]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.8.2-201303091131.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.8.2-201303091131.patch
CommitLineData
d8b4e124
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..b47493f 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,6 +75,7 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52@@ -80,6 +87,7 @@ aic7*seq.h*
53 aicasm
54 aicdb.h*
55 altivec*.c
56+ashldi3.S
57 asm-offsets.h
58 asm_offsets.h
59 autoconf.h*
60@@ -92,19 +100,24 @@ bounds.h
61 bsetup
62 btfixupprep
63 build
64+builtin-policy.h
65 bvmlinux
66 bzImage*
67 capability_names.h
68 capflags.c
69 classlist.h*
70+clut_vga16.c
71+common-cmds.h
72 comp*.log
73 compile.h*
74 conf
75 config
76 config-*
77 config_data.h*
78+config.c
79 config.mak
80 config.mak.autogen
81+config.tmp
82 conmakehash
83 consolemap_deftbl.c*
84 cpustr.h
85@@ -115,9 +128,11 @@ devlist.h*
86 dnotify_test
87 docproc
88 dslm
89+dtc-lexer.lex.c
90 elf2ecoff
91 elfconfig.h*
92 evergreen_reg_safe.h
93+exception_policy.conf
94 fixdep
95 flask.h
96 fore200e_mkfirm
97@@ -125,12 +140,15 @@ fore200e_pca_fw.c*
98 gconf
99 gconf.glade.h
100 gen-devlist
101+gen-kdb_cmds.c
102 gen_crc32table
103 gen_init_cpio
104 generated
105 genheaders
106 genksyms
107 *_gray256.c
108+hash
109+hid-example
110 hpet_example
111 hugepage-mmap
112 hugepage-shm
113@@ -145,14 +163,14 @@ int32.c
114 int4.c
115 int8.c
116 kallsyms
117-kconfig
118+kern_constants.h
119 keywords.c
120 ksym.c*
121 ksym.h*
122 kxgettext
123 lex.c
124 lex.*.c
125-linux
126+lib1funcs.S
127 logo_*.c
128 logo_*_clut224.c
129 logo_*_mono.c
130@@ -162,14 +180,15 @@ mach-types.h
131 machtypes.h
132 map
133 map_hugetlb
134-media
135 mconf
136+mdp
137 miboot*
138 mk_elfconfig
139 mkboot
140 mkbugboot
141 mkcpustr
142 mkdep
143+mkpiggy
144 mkprep
145 mkregtable
146 mktables
147@@ -185,6 +204,8 @@ oui.c*
148 page-types
149 parse.c
150 parse.h
151+parse-events*
152+pasyms.h
153 patches*
154 pca200e.bin
155 pca200e_ecd.bin2
156@@ -194,6 +215,7 @@ perf-archive
157 piggyback
158 piggy.gzip
159 piggy.S
160+pmu-*
161 pnmtologo
162 ppc_defs.h*
163 pss_boot.h
164@@ -203,7 +225,10 @@ r200_reg_safe.h
165 r300_reg_safe.h
166 r420_reg_safe.h
167 r600_reg_safe.h
168+realmode.lds
169+realmode.relocs
170 recordmcount
171+regdb.c
172 relocs
173 rlim_names.h
174 rn50_reg_safe.h
175@@ -213,8 +238,12 @@ series
176 setup
177 setup.bin
178 setup.elf
179+signing_key*
180+size_overflow_hash.h
181 sImage
182+slabinfo
183 sm_tbl*
184+sortextable
185 split-include
186 syscalltab.h
187 tables.c
188@@ -224,6 +253,7 @@ tftpboot.img
189 timeconst.h
190 times.h*
191 trix_boot.h
192+user_constants.h
193 utsrelease.h*
194 vdso-syms.lds
195 vdso.lds
196@@ -235,13 +265,17 @@ vdso32.lds
197 vdso32.so.dbg
198 vdso64.lds
199 vdso64.so.dbg
200+vdsox32.lds
201+vdsox32-syms.lds
202 version.h*
203 vmImage
204 vmlinux
205 vmlinux-*
206 vmlinux.aout
207 vmlinux.bin.all
208+vmlinux.bin.bz2
209 vmlinux.lds
210+vmlinux.relocs
211 vmlinuz
212 voffset.h
213 vsyscall.lds
214@@ -249,9 +283,12 @@ vsyscall_32.lds
215 wanxlfw.inc
216 uImage
217 unifdef
218+utsrelease.h
219 wakeup.bin
220 wakeup.elf
221 wakeup.lds
222+x509*
223 zImage*
224 zconf.hash.c
225+zconf.lex.c
226 zoffset.h
227diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
228index 986614d..0afd461 100644
229--- a/Documentation/kernel-parameters.txt
230+++ b/Documentation/kernel-parameters.txt
231@@ -922,6 +922,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
232 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
233 Default: 1024
234
235+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
236+ ignore grsecurity's /proc restrictions
237+
238+
239 hashdist= [KNL,NUMA] Large hashes allocated during boot
240 are distributed across NUMA nodes. Defaults on
241 for 64-bit NUMA, off otherwise.
242@@ -2121,6 +2125,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
243 the specified number of seconds. This is to be used if
244 your oopses keep scrolling off the screen.
245
246+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
247+ virtualization environments that don't cope well with the
248+ expand down segment used by UDEREF on X86-32 or the frequent
249+ page table updates on X86-64.
250+
251+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
252+
253 pcbit= [HW,ISDN]
254
255 pcd. [PARIDE]
256diff --git a/Makefile b/Makefile
257index 20d5318..d5cec9c 100644
258--- a/Makefile
259+++ b/Makefile
260@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
261
262 HOSTCC = gcc
263 HOSTCXX = g++
264-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
265-HOSTCXXFLAGS = -O2
266+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
267+HOSTCLFAGS += $(call cc-option, -Wno-empty-body)
268+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
269
270 # Decide whether to build built-in, modular, or both.
271 # Normally, just do built-in.
272@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
273 # Rules shared between *config targets and build targets
274
275 # Basic helpers built in scripts/
276-PHONY += scripts_basic
277-scripts_basic:
278+PHONY += scripts_basic gcc-plugins
279+scripts_basic: gcc-plugins
280 $(Q)$(MAKE) $(build)=scripts/basic
281 $(Q)rm -f .tmp_quiet_recordmcount
282
283@@ -575,6 +576,62 @@ else
284 KBUILD_CFLAGS += -O2
285 endif
286
287+ifndef DISABLE_PAX_PLUGINS
288+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
289+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
290+else
291+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
292+endif
293+ifneq ($(PLUGINCC),)
294+ifndef CONFIG_UML
295+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
296+endif
297+ifdef CONFIG_PAX_MEMORY_STACKLEAK
298+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
299+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
300+endif
301+ifdef CONFIG_KALLOCSTAT_PLUGIN
302+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
303+endif
304+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
305+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
306+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
307+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
308+endif
309+ifdef CONFIG_CHECKER_PLUGIN
310+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
311+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
312+endif
313+endif
314+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
315+ifdef CONFIG_PAX_SIZE_OVERFLOW
316+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
317+endif
318+ifdef CONFIG_PAX_LATENT_ENTROPY
319+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
320+endif
321+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
322+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
323+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS)
324+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
325+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
326+ifeq ($(KBUILD_EXTMOD),)
327+gcc-plugins:
328+ $(Q)$(MAKE) $(build)=tools/gcc
329+else
330+gcc-plugins: ;
331+endif
332+else
333+gcc-plugins:
334+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
335+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
336+else
337+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
338+endif
339+ $(Q)echo "PAX_MEMORY_STACKLEAK and other features will be less secure"
340+endif
341+endif
342+
343 include $(srctree)/arch/$(SRCARCH)/Makefile
344
345 ifdef CONFIG_READABLE_ASM
346@@ -731,7 +788,7 @@ export mod_sign_cmd
347
348
349 ifeq ($(KBUILD_EXTMOD),)
350-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
351+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
352
353 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
354 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
355@@ -778,6 +835,8 @@ endif
356
357 # The actual objects are generated when descending,
358 # make sure no implicit rule kicks in
359+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
360+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
361 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
362
363 # Handle descending into subdirectories listed in $(vmlinux-dirs)
364@@ -787,7 +846,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
365 # Error messages still appears in the original language
366
367 PHONY += $(vmlinux-dirs)
368-$(vmlinux-dirs): prepare scripts
369+$(vmlinux-dirs): gcc-plugins prepare scripts
370 $(Q)$(MAKE) $(build)=$@
371
372 # Store (new) KERNELRELASE string in include/config/kernel.release
373@@ -831,6 +890,7 @@ prepare0: archprepare FORCE
374 $(Q)$(MAKE) $(build)=.
375
376 # All the preparing..
377+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
378 prepare: prepare0
379
380 # Generate some files
381@@ -938,6 +998,8 @@ all: modules
382 # using awk while concatenating to the final file.
383
384 PHONY += modules
385+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
386+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
387 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
388 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
389 @$(kecho) ' Building modules, stage 2.';
390@@ -953,7 +1015,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
391
392 # Target to prepare building external modules
393 PHONY += modules_prepare
394-modules_prepare: prepare scripts
395+modules_prepare: gcc-plugins prepare scripts
396
397 # Target to install modules
398 PHONY += modules_install
399@@ -1019,7 +1081,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
400 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
401 signing_key.priv signing_key.x509 x509.genkey \
402 extra_certificates signing_key.x509.keyid \
403- signing_key.x509.signer
404+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
405
406 # clean - Delete most, but leave enough to build external modules
407 #
408@@ -1059,6 +1121,7 @@ distclean: mrproper
409 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
410 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
411 -o -name '.*.rej' \
412+ -o -name '.*.rej' -o -name '*.so' \
413 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
414 -type f -print | xargs rm -f
415
416@@ -1219,6 +1282,8 @@ PHONY += $(module-dirs) modules
417 $(module-dirs): crmodverdir $(objtree)/Module.symvers
418 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
419
420+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
421+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
422 modules: $(module-dirs)
423 @$(kecho) ' Building modules, stage 2.';
424 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
425@@ -1355,17 +1420,21 @@ else
426 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
427 endif
428
429-%.s: %.c prepare scripts FORCE
430+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
431+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
432+%.s: %.c gcc-plugins prepare scripts FORCE
433 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
434 %.i: %.c prepare scripts FORCE
435 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
436-%.o: %.c prepare scripts FORCE
437+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
438+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
439+%.o: %.c gcc-plugins prepare scripts FORCE
440 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
441 %.lst: %.c prepare scripts FORCE
442 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
443-%.s: %.S prepare scripts FORCE
444+%.s: %.S gcc-plugins prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446-%.o: %.S prepare scripts FORCE
447+%.o: %.S gcc-plugins prepare scripts FORCE
448 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
449 %.symtypes: %.c prepare scripts FORCE
450 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
451@@ -1375,11 +1444,15 @@ endif
452 $(cmd_crmodverdir)
453 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
454 $(build)=$(build-dir)
455-%/: prepare scripts FORCE
456+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
457+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
458+%/: gcc-plugins prepare scripts FORCE
459 $(cmd_crmodverdir)
460 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
461 $(build)=$(build-dir)
462-%.ko: prepare scripts FORCE
463+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
464+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
465+%.ko: gcc-plugins prepare scripts FORCE
466 $(cmd_crmodverdir)
467 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
468 $(build)=$(build-dir) $(@:.ko=.o)
469diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
470index c2cbe4f..f7264b4 100644
471--- a/arch/alpha/include/asm/atomic.h
472+++ b/arch/alpha/include/asm/atomic.h
473@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
474 #define atomic_dec(v) atomic_sub(1,(v))
475 #define atomic64_dec(v) atomic64_sub(1,(v))
476
477+#define atomic64_read_unchecked(v) atomic64_read(v)
478+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
479+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
480+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
481+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
482+#define atomic64_inc_unchecked(v) atomic64_inc(v)
483+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
484+#define atomic64_dec_unchecked(v) atomic64_dec(v)
485+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
486+
487 #define smp_mb__before_atomic_dec() smp_mb()
488 #define smp_mb__after_atomic_dec() smp_mb()
489 #define smp_mb__before_atomic_inc() smp_mb()
490diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
491index ad368a9..fbe0f25 100644
492--- a/arch/alpha/include/asm/cache.h
493+++ b/arch/alpha/include/asm/cache.h
494@@ -4,19 +4,19 @@
495 #ifndef __ARCH_ALPHA_CACHE_H
496 #define __ARCH_ALPHA_CACHE_H
497
498+#include <linux/const.h>
499
500 /* Bytes per L1 (data) cache line. */
501 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
502-# define L1_CACHE_BYTES 64
503 # define L1_CACHE_SHIFT 6
504 #else
505 /* Both EV4 and EV5 are write-through, read-allocate,
506 direct-mapped, physical.
507 */
508-# define L1_CACHE_BYTES 32
509 # define L1_CACHE_SHIFT 5
510 #endif
511
512+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
513 #define SMP_CACHE_BYTES L1_CACHE_BYTES
514
515 #endif
516diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
517index 968d999..d36b2df 100644
518--- a/arch/alpha/include/asm/elf.h
519+++ b/arch/alpha/include/asm/elf.h
520@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
521
522 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
523
524+#ifdef CONFIG_PAX_ASLR
525+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
526+
527+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
528+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
529+#endif
530+
531 /* $0 is set by ld.so to a pointer to a function which might be
532 registered using atexit. This provides a mean for the dynamic
533 linker to call DT_FINI functions for shared libraries that have
534diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
535index bc2a0da..8ad11ee 100644
536--- a/arch/alpha/include/asm/pgalloc.h
537+++ b/arch/alpha/include/asm/pgalloc.h
538@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
539 pgd_set(pgd, pmd);
540 }
541
542+static inline void
543+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
544+{
545+ pgd_populate(mm, pgd, pmd);
546+}
547+
548 extern pgd_t *pgd_alloc(struct mm_struct *mm);
549
550 static inline void
551diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
552index 81a4342..348b927 100644
553--- a/arch/alpha/include/asm/pgtable.h
554+++ b/arch/alpha/include/asm/pgtable.h
555@@ -102,6 +102,17 @@ struct vm_area_struct;
556 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
557 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
558 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
559+
560+#ifdef CONFIG_PAX_PAGEEXEC
561+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
562+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
563+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
564+#else
565+# define PAGE_SHARED_NOEXEC PAGE_SHARED
566+# define PAGE_COPY_NOEXEC PAGE_COPY
567+# define PAGE_READONLY_NOEXEC PAGE_READONLY
568+#endif
569+
570 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
571
572 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
573diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
574index 2fd00b7..cfd5069 100644
575--- a/arch/alpha/kernel/module.c
576+++ b/arch/alpha/kernel/module.c
577@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
578
579 /* The small sections were sorted to the end of the segment.
580 The following should definitely cover them. */
581- gp = (u64)me->module_core + me->core_size - 0x8000;
582+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
583 got = sechdrs[me->arch.gotsecindex].sh_addr;
584
585 for (i = 0; i < n; i++) {
586diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
587index 14db93e..47bed62 100644
588--- a/arch/alpha/kernel/osf_sys.c
589+++ b/arch/alpha/kernel/osf_sys.c
590@@ -1295,16 +1295,16 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
591 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
592
593 static unsigned long
594-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
595- unsigned long limit)
596+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
597+ unsigned long limit, unsigned long flags)
598 {
599 struct vm_area_struct *vma = find_vma(current->mm, addr);
600-
601+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
602 while (1) {
603 /* At this point: (!vma || addr < vma->vm_end). */
604 if (limit - len < addr)
605 return -ENOMEM;
606- if (!vma || addr + len <= vma->vm_start)
607+ if (check_heap_stack_gap(vma, addr, len, offset))
608 return addr;
609 addr = vma->vm_end;
610 vma = vma->vm_next;
611@@ -1340,20 +1340,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
612 merely specific addresses, but regions of memory -- perhaps
613 this feature should be incorporated into all ports? */
614
615+#ifdef CONFIG_PAX_RANDMMAP
616+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
617+#endif
618+
619 if (addr) {
620- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
621+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
622 if (addr != (unsigned long) -ENOMEM)
623 return addr;
624 }
625
626 /* Next, try allocating at TASK_UNMAPPED_BASE. */
627- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
628- len, limit);
629+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
630+
631 if (addr != (unsigned long) -ENOMEM)
632 return addr;
633
634 /* Finally, try allocating in low memory. */
635- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
636+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
637
638 return addr;
639 }
640diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
641index 0c4132d..88f0d53 100644
642--- a/arch/alpha/mm/fault.c
643+++ b/arch/alpha/mm/fault.c
644@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
645 __reload_thread(pcb);
646 }
647
648+#ifdef CONFIG_PAX_PAGEEXEC
649+/*
650+ * PaX: decide what to do with offenders (regs->pc = fault address)
651+ *
652+ * returns 1 when task should be killed
653+ * 2 when patched PLT trampoline was detected
654+ * 3 when unpatched PLT trampoline was detected
655+ */
656+static int pax_handle_fetch_fault(struct pt_regs *regs)
657+{
658+
659+#ifdef CONFIG_PAX_EMUPLT
660+ int err;
661+
662+ do { /* PaX: patched PLT emulation #1 */
663+ unsigned int ldah, ldq, jmp;
664+
665+ err = get_user(ldah, (unsigned int *)regs->pc);
666+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
667+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
668+
669+ if (err)
670+ break;
671+
672+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
673+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
674+ jmp == 0x6BFB0000U)
675+ {
676+ unsigned long r27, addr;
677+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
678+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
679+
680+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
681+ err = get_user(r27, (unsigned long *)addr);
682+ if (err)
683+ break;
684+
685+ regs->r27 = r27;
686+ regs->pc = r27;
687+ return 2;
688+ }
689+ } while (0);
690+
691+ do { /* PaX: patched PLT emulation #2 */
692+ unsigned int ldah, lda, br;
693+
694+ err = get_user(ldah, (unsigned int *)regs->pc);
695+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
696+ err |= get_user(br, (unsigned int *)(regs->pc+8));
697+
698+ if (err)
699+ break;
700+
701+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
702+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
703+ (br & 0xFFE00000U) == 0xC3E00000U)
704+ {
705+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
706+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
707+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
708+
709+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
710+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
711+ return 2;
712+ }
713+ } while (0);
714+
715+ do { /* PaX: unpatched PLT emulation */
716+ unsigned int br;
717+
718+ err = get_user(br, (unsigned int *)regs->pc);
719+
720+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
721+ unsigned int br2, ldq, nop, jmp;
722+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
723+
724+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
725+ err = get_user(br2, (unsigned int *)addr);
726+ err |= get_user(ldq, (unsigned int *)(addr+4));
727+ err |= get_user(nop, (unsigned int *)(addr+8));
728+ err |= get_user(jmp, (unsigned int *)(addr+12));
729+ err |= get_user(resolver, (unsigned long *)(addr+16));
730+
731+ if (err)
732+ break;
733+
734+ if (br2 == 0xC3600000U &&
735+ ldq == 0xA77B000CU &&
736+ nop == 0x47FF041FU &&
737+ jmp == 0x6B7B0000U)
738+ {
739+ regs->r28 = regs->pc+4;
740+ regs->r27 = addr+16;
741+ regs->pc = resolver;
742+ return 3;
743+ }
744+ }
745+ } while (0);
746+#endif
747+
748+ return 1;
749+}
750+
751+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
752+{
753+ unsigned long i;
754+
755+ printk(KERN_ERR "PAX: bytes at PC: ");
756+ for (i = 0; i < 5; i++) {
757+ unsigned int c;
758+ if (get_user(c, (unsigned int *)pc+i))
759+ printk(KERN_CONT "???????? ");
760+ else
761+ printk(KERN_CONT "%08x ", c);
762+ }
763+ printk("\n");
764+}
765+#endif
766
767 /*
768 * This routine handles page faults. It determines the address,
769@@ -133,8 +251,29 @@ retry:
770 good_area:
771 si_code = SEGV_ACCERR;
772 if (cause < 0) {
773- if (!(vma->vm_flags & VM_EXEC))
774+ if (!(vma->vm_flags & VM_EXEC)) {
775+
776+#ifdef CONFIG_PAX_PAGEEXEC
777+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
778+ goto bad_area;
779+
780+ up_read(&mm->mmap_sem);
781+ switch (pax_handle_fetch_fault(regs)) {
782+
783+#ifdef CONFIG_PAX_EMUPLT
784+ case 2:
785+ case 3:
786+ return;
787+#endif
788+
789+ }
790+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
791+ do_group_exit(SIGKILL);
792+#else
793 goto bad_area;
794+#endif
795+
796+ }
797 } else if (!cause) {
798 /* Allow reads even for write-only mappings */
799 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
800diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
801index 67874b8..0e40765 100644
802--- a/arch/arm/Kconfig
803+++ b/arch/arm/Kconfig
804@@ -1813,7 +1813,7 @@ config ALIGNMENT_TRAP
805
806 config UACCESS_WITH_MEMCPY
807 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
808- depends on MMU
809+ depends on MMU && !PAX_MEMORY_UDEREF
810 default y if CPU_FEROCEON
811 help
812 Implement faster copy_to_user and clear_user methods for CPU
813diff --git a/arch/arm/common/gic.c b/arch/arm/common/gic.c
814index 87dfa902..3a523fc 100644
815--- a/arch/arm/common/gic.c
816+++ b/arch/arm/common/gic.c
817@@ -81,7 +81,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
818 * Supported arch specific GIC irq extension.
819 * Default make them NULL.
820 */
821-struct irq_chip gic_arch_extn = {
822+irq_chip_no_const gic_arch_extn __read_only = {
823 .irq_eoi = NULL,
824 .irq_mask = NULL,
825 .irq_unmask = NULL,
826@@ -329,7 +329,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
827 chained_irq_exit(chip, desc);
828 }
829
830-static struct irq_chip gic_chip = {
831+static irq_chip_no_const gic_chip __read_only = {
832 .name = "GIC",
833 .irq_mask = gic_mask_irq,
834 .irq_unmask = gic_unmask_irq,
835diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
836index c79f61f..9ac0642 100644
837--- a/arch/arm/include/asm/atomic.h
838+++ b/arch/arm/include/asm/atomic.h
839@@ -17,17 +17,35 @@
840 #include <asm/barrier.h>
841 #include <asm/cmpxchg.h>
842
843+#ifdef CONFIG_GENERIC_ATOMIC64
844+#include <asm-generic/atomic64.h>
845+#endif
846+
847 #define ATOMIC_INIT(i) { (i) }
848
849 #ifdef __KERNEL__
850
851+#define _ASM_EXTABLE(from, to) \
852+" .pushsection __ex_table,\"a\"\n"\
853+" .align 3\n" \
854+" .long " #from ", " #to"\n" \
855+" .popsection"
856+
857 /*
858 * On ARM, ordinary assignment (str instruction) doesn't clear the local
859 * strex/ldrex monitor on some implementations. The reason we can use it for
860 * atomic_set() is the clrex or dummy strex done on every exception return.
861 */
862 #define atomic_read(v) (*(volatile int *)&(v)->counter)
863+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
864+{
865+ return v->counter;
866+}
867 #define atomic_set(v,i) (((v)->counter) = (i))
868+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
869+{
870+ v->counter = i;
871+}
872
873 #if __LINUX_ARM_ARCH__ >= 6
874
875@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
876 int result;
877
878 __asm__ __volatile__("@ atomic_add\n"
879+"1: ldrex %1, [%3]\n"
880+" adds %0, %1, %4\n"
881+
882+#ifdef CONFIG_PAX_REFCOUNT
883+" bvc 3f\n"
884+"2: bkpt 0xf103\n"
885+"3:\n"
886+#endif
887+
888+" strex %1, %0, [%3]\n"
889+" teq %1, #0\n"
890+" bne 1b"
891+
892+#ifdef CONFIG_PAX_REFCOUNT
893+"\n4:\n"
894+ _ASM_EXTABLE(2b, 4b)
895+#endif
896+
897+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
898+ : "r" (&v->counter), "Ir" (i)
899+ : "cc");
900+}
901+
902+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
903+{
904+ unsigned long tmp;
905+ int result;
906+
907+ __asm__ __volatile__("@ atomic_add_unchecked\n"
908 "1: ldrex %0, [%3]\n"
909 " add %0, %0, %4\n"
910 " strex %1, %0, [%3]\n"
911@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
912 smp_mb();
913
914 __asm__ __volatile__("@ atomic_add_return\n"
915+"1: ldrex %1, [%3]\n"
916+" adds %0, %1, %4\n"
917+
918+#ifdef CONFIG_PAX_REFCOUNT
919+" bvc 3f\n"
920+" mov %0, %1\n"
921+"2: bkpt 0xf103\n"
922+"3:\n"
923+#endif
924+
925+" strex %1, %0, [%3]\n"
926+" teq %1, #0\n"
927+" bne 1b"
928+
929+#ifdef CONFIG_PAX_REFCOUNT
930+"\n4:\n"
931+ _ASM_EXTABLE(2b, 4b)
932+#endif
933+
934+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
935+ : "r" (&v->counter), "Ir" (i)
936+ : "cc");
937+
938+ smp_mb();
939+
940+ return result;
941+}
942+
943+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
944+{
945+ unsigned long tmp;
946+ int result;
947+
948+ smp_mb();
949+
950+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
951 "1: ldrex %0, [%3]\n"
952 " add %0, %0, %4\n"
953 " strex %1, %0, [%3]\n"
954@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
955 int result;
956
957 __asm__ __volatile__("@ atomic_sub\n"
958+"1: ldrex %1, [%3]\n"
959+" subs %0, %1, %4\n"
960+
961+#ifdef CONFIG_PAX_REFCOUNT
962+" bvc 3f\n"
963+"2: bkpt 0xf103\n"
964+"3:\n"
965+#endif
966+
967+" strex %1, %0, [%3]\n"
968+" teq %1, #0\n"
969+" bne 1b"
970+
971+#ifdef CONFIG_PAX_REFCOUNT
972+"\n4:\n"
973+ _ASM_EXTABLE(2b, 4b)
974+#endif
975+
976+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
977+ : "r" (&v->counter), "Ir" (i)
978+ : "cc");
979+}
980+
981+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
982+{
983+ unsigned long tmp;
984+ int result;
985+
986+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
987 "1: ldrex %0, [%3]\n"
988 " sub %0, %0, %4\n"
989 " strex %1, %0, [%3]\n"
990@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
991 smp_mb();
992
993 __asm__ __volatile__("@ atomic_sub_return\n"
994-"1: ldrex %0, [%3]\n"
995-" sub %0, %0, %4\n"
996+"1: ldrex %1, [%3]\n"
997+" subs %0, %1, %4\n"
998+
999+#ifdef CONFIG_PAX_REFCOUNT
1000+" bvc 3f\n"
1001+" mov %0, %1\n"
1002+"2: bkpt 0xf103\n"
1003+"3:\n"
1004+#endif
1005+
1006 " strex %1, %0, [%3]\n"
1007 " teq %1, #0\n"
1008 " bne 1b"
1009+
1010+#ifdef CONFIG_PAX_REFCOUNT
1011+"\n4:\n"
1012+ _ASM_EXTABLE(2b, 4b)
1013+#endif
1014+
1015 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1016 : "r" (&v->counter), "Ir" (i)
1017 : "cc");
1018@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1019 return oldval;
1020 }
1021
1022+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1023+{
1024+ unsigned long oldval, res;
1025+
1026+ smp_mb();
1027+
1028+ do {
1029+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1030+ "ldrex %1, [%3]\n"
1031+ "mov %0, #0\n"
1032+ "teq %1, %4\n"
1033+ "strexeq %0, %5, [%3]\n"
1034+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1035+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1036+ : "cc");
1037+ } while (res);
1038+
1039+ smp_mb();
1040+
1041+ return oldval;
1042+}
1043+
1044 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1045 {
1046 unsigned long tmp, tmp2;
1047@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1048
1049 return val;
1050 }
1051+
1052+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1053+{
1054+ return atomic_add_return(i, v);
1055+}
1056+
1057 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1058+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1059+{
1060+ (void) atomic_add_return(i, v);
1061+}
1062
1063 static inline int atomic_sub_return(int i, atomic_t *v)
1064 {
1065@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1066 return val;
1067 }
1068 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1069+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1070+{
1071+ (void) atomic_sub_return(i, v);
1072+}
1073
1074 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1075 {
1076@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1077 return ret;
1078 }
1079
1080+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1081+{
1082+ return atomic_cmpxchg(v, old, new);
1083+}
1084+
1085 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1086 {
1087 unsigned long flags;
1088@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1089 #endif /* __LINUX_ARM_ARCH__ */
1090
1091 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1092+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1093+{
1094+ return xchg(&v->counter, new);
1095+}
1096
1097 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1098 {
1099@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1100 }
1101
1102 #define atomic_inc(v) atomic_add(1, v)
1103+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1104+{
1105+ atomic_add_unchecked(1, v);
1106+}
1107 #define atomic_dec(v) atomic_sub(1, v)
1108+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1109+{
1110+ atomic_sub_unchecked(1, v);
1111+}
1112
1113 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1114+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1115+{
1116+ return atomic_add_return_unchecked(1, v) == 0;
1117+}
1118 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1119 #define atomic_inc_return(v) (atomic_add_return(1, v))
1120+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1121+{
1122+ return atomic_add_return_unchecked(1, v);
1123+}
1124 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1125 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1126
1127@@ -241,6 +428,14 @@ typedef struct {
1128 u64 __aligned(8) counter;
1129 } atomic64_t;
1130
1131+#ifdef CONFIG_PAX_REFCOUNT
1132+typedef struct {
1133+ u64 __aligned(8) counter;
1134+} atomic64_unchecked_t;
1135+#else
1136+typedef atomic64_t atomic64_unchecked_t;
1137+#endif
1138+
1139 #define ATOMIC64_INIT(i) { (i) }
1140
1141 static inline u64 atomic64_read(const atomic64_t *v)
1142@@ -256,6 +451,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1143 return result;
1144 }
1145
1146+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1147+{
1148+ u64 result;
1149+
1150+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1151+" ldrexd %0, %H0, [%1]"
1152+ : "=&r" (result)
1153+ : "r" (&v->counter), "Qo" (v->counter)
1154+ );
1155+
1156+ return result;
1157+}
1158+
1159 static inline void atomic64_set(atomic64_t *v, u64 i)
1160 {
1161 u64 tmp;
1162@@ -270,6 +478,20 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1163 : "cc");
1164 }
1165
1166+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1167+{
1168+ u64 tmp;
1169+
1170+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1171+"1: ldrexd %0, %H0, [%2]\n"
1172+" strexd %0, %3, %H3, [%2]\n"
1173+" teq %0, #0\n"
1174+" bne 1b"
1175+ : "=&r" (tmp), "=Qo" (v->counter)
1176+ : "r" (&v->counter), "r" (i)
1177+ : "cc");
1178+}
1179+
1180 static inline void atomic64_add(u64 i, atomic64_t *v)
1181 {
1182 u64 result;
1183@@ -278,6 +500,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1184 __asm__ __volatile__("@ atomic64_add\n"
1185 "1: ldrexd %0, %H0, [%3]\n"
1186 " adds %0, %0, %4\n"
1187+" adcs %H0, %H0, %H4\n"
1188+
1189+#ifdef CONFIG_PAX_REFCOUNT
1190+" bvc 3f\n"
1191+"2: bkpt 0xf103\n"
1192+"3:\n"
1193+#endif
1194+
1195+" strexd %1, %0, %H0, [%3]\n"
1196+" teq %1, #0\n"
1197+" bne 1b"
1198+
1199+#ifdef CONFIG_PAX_REFCOUNT
1200+"\n4:\n"
1201+ _ASM_EXTABLE(2b, 4b)
1202+#endif
1203+
1204+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1205+ : "r" (&v->counter), "r" (i)
1206+ : "cc");
1207+}
1208+
1209+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1210+{
1211+ u64 result;
1212+ unsigned long tmp;
1213+
1214+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1215+"1: ldrexd %0, %H0, [%3]\n"
1216+" adds %0, %0, %4\n"
1217 " adc %H0, %H0, %H4\n"
1218 " strexd %1, %0, %H0, [%3]\n"
1219 " teq %1, #0\n"
1220@@ -289,12 +541,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1221
1222 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1223 {
1224- u64 result;
1225- unsigned long tmp;
1226+ u64 result, tmp;
1227
1228 smp_mb();
1229
1230 __asm__ __volatile__("@ atomic64_add_return\n"
1231+"1: ldrexd %1, %H1, [%3]\n"
1232+" adds %0, %1, %4\n"
1233+" adcs %H0, %H1, %H4\n"
1234+
1235+#ifdef CONFIG_PAX_REFCOUNT
1236+" bvc 3f\n"
1237+" mov %0, %1\n"
1238+" mov %H0, %H1\n"
1239+"2: bkpt 0xf103\n"
1240+"3:\n"
1241+#endif
1242+
1243+" strexd %1, %0, %H0, [%3]\n"
1244+" teq %1, #0\n"
1245+" bne 1b"
1246+
1247+#ifdef CONFIG_PAX_REFCOUNT
1248+"\n4:\n"
1249+ _ASM_EXTABLE(2b, 4b)
1250+#endif
1251+
1252+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1253+ : "r" (&v->counter), "r" (i)
1254+ : "cc");
1255+
1256+ smp_mb();
1257+
1258+ return result;
1259+}
1260+
1261+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1262+{
1263+ u64 result;
1264+ unsigned long tmp;
1265+
1266+ smp_mb();
1267+
1268+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1269 "1: ldrexd %0, %H0, [%3]\n"
1270 " adds %0, %0, %4\n"
1271 " adc %H0, %H0, %H4\n"
1272@@ -318,6 +607,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1273 __asm__ __volatile__("@ atomic64_sub\n"
1274 "1: ldrexd %0, %H0, [%3]\n"
1275 " subs %0, %0, %4\n"
1276+" sbcs %H0, %H0, %H4\n"
1277+
1278+#ifdef CONFIG_PAX_REFCOUNT
1279+" bvc 3f\n"
1280+"2: bkpt 0xf103\n"
1281+"3:\n"
1282+#endif
1283+
1284+" strexd %1, %0, %H0, [%3]\n"
1285+" teq %1, #0\n"
1286+" bne 1b"
1287+
1288+#ifdef CONFIG_PAX_REFCOUNT
1289+"\n4:\n"
1290+ _ASM_EXTABLE(2b, 4b)
1291+#endif
1292+
1293+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1294+ : "r" (&v->counter), "r" (i)
1295+ : "cc");
1296+}
1297+
1298+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1299+{
1300+ u64 result;
1301+ unsigned long tmp;
1302+
1303+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1304+"1: ldrexd %0, %H0, [%3]\n"
1305+" subs %0, %0, %4\n"
1306 " sbc %H0, %H0, %H4\n"
1307 " strexd %1, %0, %H0, [%3]\n"
1308 " teq %1, #0\n"
1309@@ -329,18 +648,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1310
1311 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1312 {
1313- u64 result;
1314- unsigned long tmp;
1315+ u64 result, tmp;
1316
1317 smp_mb();
1318
1319 __asm__ __volatile__("@ atomic64_sub_return\n"
1320-"1: ldrexd %0, %H0, [%3]\n"
1321-" subs %0, %0, %4\n"
1322-" sbc %H0, %H0, %H4\n"
1323+"1: ldrexd %1, %H1, [%3]\n"
1324+" subs %0, %1, %4\n"
1325+" sbcs %H0, %H1, %H4\n"
1326+
1327+#ifdef CONFIG_PAX_REFCOUNT
1328+" bvc 3f\n"
1329+" mov %0, %1\n"
1330+" mov %H0, %H1\n"
1331+"2: bkpt 0xf103\n"
1332+"3:\n"
1333+#endif
1334+
1335 " strexd %1, %0, %H0, [%3]\n"
1336 " teq %1, #0\n"
1337 " bne 1b"
1338+
1339+#ifdef CONFIG_PAX_REFCOUNT
1340+"\n4:\n"
1341+ _ASM_EXTABLE(2b, 4b)
1342+#endif
1343+
1344 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1345 : "r" (&v->counter), "r" (i)
1346 : "cc");
1347@@ -374,6 +707,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1348 return oldval;
1349 }
1350
1351+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1352+{
1353+ u64 oldval;
1354+ unsigned long res;
1355+
1356+ smp_mb();
1357+
1358+ do {
1359+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1360+ "ldrexd %1, %H1, [%3]\n"
1361+ "mov %0, #0\n"
1362+ "teq %1, %4\n"
1363+ "teqeq %H1, %H4\n"
1364+ "strexdeq %0, %5, %H5, [%3]"
1365+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1366+ : "r" (&ptr->counter), "r" (old), "r" (new)
1367+ : "cc");
1368+ } while (res);
1369+
1370+ smp_mb();
1371+
1372+ return oldval;
1373+}
1374+
1375 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1376 {
1377 u64 result;
1378@@ -397,21 +754,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1379
1380 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1381 {
1382- u64 result;
1383- unsigned long tmp;
1384+ u64 result, tmp;
1385
1386 smp_mb();
1387
1388 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1389-"1: ldrexd %0, %H0, [%3]\n"
1390-" subs %0, %0, #1\n"
1391-" sbc %H0, %H0, #0\n"
1392+"1: ldrexd %1, %H1, [%3]\n"
1393+" subs %0, %1, #1\n"
1394+" sbcs %H0, %H1, #0\n"
1395+
1396+#ifdef CONFIG_PAX_REFCOUNT
1397+" bvc 3f\n"
1398+" mov %0, %1\n"
1399+" mov %H0, %H1\n"
1400+"2: bkpt 0xf103\n"
1401+"3:\n"
1402+#endif
1403+
1404 " teq %H0, #0\n"
1405-" bmi 2f\n"
1406+" bmi 4f\n"
1407 " strexd %1, %0, %H0, [%3]\n"
1408 " teq %1, #0\n"
1409 " bne 1b\n"
1410-"2:"
1411+"4:\n"
1412+
1413+#ifdef CONFIG_PAX_REFCOUNT
1414+ _ASM_EXTABLE(2b, 4b)
1415+#endif
1416+
1417 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1418 : "r" (&v->counter)
1419 : "cc");
1420@@ -434,13 +804,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1421 " teq %0, %5\n"
1422 " teqeq %H0, %H5\n"
1423 " moveq %1, #0\n"
1424-" beq 2f\n"
1425+" beq 4f\n"
1426 " adds %0, %0, %6\n"
1427-" adc %H0, %H0, %H6\n"
1428+" adcs %H0, %H0, %H6\n"
1429+
1430+#ifdef CONFIG_PAX_REFCOUNT
1431+" bvc 3f\n"
1432+"2: bkpt 0xf103\n"
1433+"3:\n"
1434+#endif
1435+
1436 " strexd %2, %0, %H0, [%4]\n"
1437 " teq %2, #0\n"
1438 " bne 1b\n"
1439-"2:"
1440+"4:\n"
1441+
1442+#ifdef CONFIG_PAX_REFCOUNT
1443+ _ASM_EXTABLE(2b, 4b)
1444+#endif
1445+
1446 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1447 : "r" (&v->counter), "r" (u), "r" (a)
1448 : "cc");
1449@@ -453,10 +835,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1450
1451 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1452 #define atomic64_inc(v) atomic64_add(1LL, (v))
1453+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1454 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1455+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1456 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1457 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1458 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1459+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1460 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1461 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1463diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1464index 75fe66b..ba3dee4 100644
1465--- a/arch/arm/include/asm/cache.h
1466+++ b/arch/arm/include/asm/cache.h
1467@@ -4,8 +4,10 @@
1468 #ifndef __ASMARM_CACHE_H
1469 #define __ASMARM_CACHE_H
1470
1471+#include <linux/const.h>
1472+
1473 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1474-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1475+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1476
1477 /*
1478 * Memory returned by kmalloc() may be used for DMA, so we must make
1479@@ -24,5 +26,6 @@
1480 #endif
1481
1482 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1483+#define __read_only __attribute__ ((__section__(".data..read_only")))
1484
1485 #endif
1486diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1487index e1489c5..d418304 100644
1488--- a/arch/arm/include/asm/cacheflush.h
1489+++ b/arch/arm/include/asm/cacheflush.h
1490@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1491 void (*dma_unmap_area)(const void *, size_t, int);
1492
1493 void (*dma_flush_range)(const void *, const void *);
1494-};
1495+} __no_const;
1496
1497 /*
1498 * Select the calling method
1499diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1500index 6dcc164..b14d917 100644
1501--- a/arch/arm/include/asm/checksum.h
1502+++ b/arch/arm/include/asm/checksum.h
1503@@ -37,7 +37,19 @@ __wsum
1504 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1505
1506 __wsum
1507-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1508+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1509+
1510+static inline __wsum
1511+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1512+{
1513+ __wsum ret;
1514+ pax_open_userland();
1515+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1516+ pax_close_userland();
1517+ return ret;
1518+}
1519+
1520+
1521
1522 /*
1523 * Fold a partial checksum without adding pseudo headers
1524diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1525index 7eb18c1..e38b6d2 100644
1526--- a/arch/arm/include/asm/cmpxchg.h
1527+++ b/arch/arm/include/asm/cmpxchg.h
1528@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1529
1530 #define xchg(ptr,x) \
1531 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1532+#define xchg_unchecked(ptr,x) \
1533+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1534
1535 #include <asm-generic/cmpxchg-local.h>
1536
1537diff --git a/arch/arm/include/asm/delay.h b/arch/arm/include/asm/delay.h
1538index ab98fdd..6b19938 100644
1539--- a/arch/arm/include/asm/delay.h
1540+++ b/arch/arm/include/asm/delay.h
1541@@ -24,9 +24,9 @@ extern struct arm_delay_ops {
1542 void (*delay)(unsigned long);
1543 void (*const_udelay)(unsigned long);
1544 void (*udelay)(unsigned long);
1545-} arm_delay_ops;
1546+} *arm_delay_ops;
1547
1548-#define __delay(n) arm_delay_ops.delay(n)
1549+#define __delay(n) arm_delay_ops->delay(n)
1550
1551 /*
1552 * This function intentionally does not exist; if you see references to
1553@@ -47,8 +47,8 @@ extern void __bad_udelay(void);
1554 * first constant multiplications gets optimized away if the delay is
1555 * a constant)
1556 */
1557-#define __udelay(n) arm_delay_ops.udelay(n)
1558-#define __const_udelay(n) arm_delay_ops.const_udelay(n)
1559+#define __udelay(n) arm_delay_ops->udelay(n)
1560+#define __const_udelay(n) arm_delay_ops->const_udelay(n)
1561
1562 #define udelay(n) \
1563 (__builtin_constant_p(n) ? \
1564diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1565index 6ddbe44..b5e38b1 100644
1566--- a/arch/arm/include/asm/domain.h
1567+++ b/arch/arm/include/asm/domain.h
1568@@ -48,18 +48,37 @@
1569 * Domain types
1570 */
1571 #define DOMAIN_NOACCESS 0
1572-#define DOMAIN_CLIENT 1
1573 #ifdef CONFIG_CPU_USE_DOMAINS
1574+#define DOMAIN_USERCLIENT 1
1575+#define DOMAIN_KERNELCLIENT 1
1576 #define DOMAIN_MANAGER 3
1577+#define DOMAIN_VECTORS DOMAIN_USER
1578 #else
1579+
1580+#ifdef CONFIG_PAX_KERNEXEC
1581 #define DOMAIN_MANAGER 1
1582+#define DOMAIN_KERNEXEC 3
1583+#else
1584+#define DOMAIN_MANAGER 1
1585+#endif
1586+
1587+#ifdef CONFIG_PAX_MEMORY_UDEREF
1588+#define DOMAIN_USERCLIENT 0
1589+#define DOMAIN_UDEREF 1
1590+#define DOMAIN_VECTORS DOMAIN_KERNEL
1591+#else
1592+#define DOMAIN_USERCLIENT 1
1593+#define DOMAIN_VECTORS DOMAIN_USER
1594+#endif
1595+#define DOMAIN_KERNELCLIENT 1
1596+
1597 #endif
1598
1599 #define domain_val(dom,type) ((type) << (2*(dom)))
1600
1601 #ifndef __ASSEMBLY__
1602
1603-#ifdef CONFIG_CPU_USE_DOMAINS
1604+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1605 static inline void set_domain(unsigned val)
1606 {
1607 asm volatile(
1608@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1609 isb();
1610 }
1611
1612-#define modify_domain(dom,type) \
1613- do { \
1614- struct thread_info *thread = current_thread_info(); \
1615- unsigned int domain = thread->cpu_domain; \
1616- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1617- thread->cpu_domain = domain | domain_val(dom, type); \
1618- set_domain(thread->cpu_domain); \
1619- } while (0)
1620-
1621+extern void modify_domain(unsigned int dom, unsigned int type);
1622 #else
1623 static inline void set_domain(unsigned val) { }
1624 static inline void modify_domain(unsigned dom, unsigned type) { }
1625diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1626index 38050b1..9d90e8b 100644
1627--- a/arch/arm/include/asm/elf.h
1628+++ b/arch/arm/include/asm/elf.h
1629@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1630 the loader. We need to make sure that it is out of the way of the program
1631 that it will "exec", and that there is sufficient room for the brk. */
1632
1633-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1634+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1635+
1636+#ifdef CONFIG_PAX_ASLR
1637+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1638+
1639+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1640+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1641+#endif
1642
1643 /* When the program starts, a1 contains a pointer to a function to be
1644 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1645@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1646 extern void elf_set_personality(const struct elf32_hdr *);
1647 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1648
1649-struct mm_struct;
1650-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1651-#define arch_randomize_brk arch_randomize_brk
1652-
1653 #endif
1654diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1655index de53547..52b9a28 100644
1656--- a/arch/arm/include/asm/fncpy.h
1657+++ b/arch/arm/include/asm/fncpy.h
1658@@ -81,7 +81,9 @@
1659 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1660 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1661 \
1662+ pax_open_kernel(); \
1663 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1664+ pax_close_kernel(); \
1665 flush_icache_range((unsigned long)(dest_buf), \
1666 (unsigned long)(dest_buf) + (size)); \
1667 \
1668diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1669index e42cf59..7b94b8f 100644
1670--- a/arch/arm/include/asm/futex.h
1671+++ b/arch/arm/include/asm/futex.h
1672@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1673 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1674 return -EFAULT;
1675
1676+ pax_open_userland();
1677+
1678 smp_mb();
1679 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1680 "1: ldrex %1, [%4]\n"
1681@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1682 : "cc", "memory");
1683 smp_mb();
1684
1685+ pax_close_userland();
1686+
1687 *uval = val;
1688 return ret;
1689 }
1690@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1691 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1692 return -EFAULT;
1693
1694+ pax_open_userland();
1695+
1696 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1697 "1: " TUSER(ldr) " %1, [%4]\n"
1698 " teq %1, %2\n"
1699@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1700 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1701 : "cc", "memory");
1702
1703+ pax_close_userland();
1704+
1705 *uval = val;
1706 return ret;
1707 }
1708@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1709 return -EFAULT;
1710
1711 pagefault_disable(); /* implies preempt_disable() */
1712+ pax_open_userland();
1713
1714 switch (op) {
1715 case FUTEX_OP_SET:
1716@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1717 ret = -ENOSYS;
1718 }
1719
1720+ pax_close_userland();
1721 pagefault_enable(); /* subsumes preempt_enable() */
1722
1723 if (!ret) {
1724diff --git a/arch/arm/include/asm/hardware/gic.h b/arch/arm/include/asm/hardware/gic.h
1725index 4b1ce6c..bea3f73 100644
1726--- a/arch/arm/include/asm/hardware/gic.h
1727+++ b/arch/arm/include/asm/hardware/gic.h
1728@@ -34,9 +34,10 @@
1729
1730 #ifndef __ASSEMBLY__
1731 #include <linux/irqdomain.h>
1732+#include <linux/irq.h>
1733 struct device_node;
1734
1735-extern struct irq_chip gic_arch_extn;
1736+extern irq_chip_no_const gic_arch_extn;
1737
1738 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
1739 u32 offset, struct device_node *);
1740diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1741index 83eb2f7..ed77159 100644
1742--- a/arch/arm/include/asm/kmap_types.h
1743+++ b/arch/arm/include/asm/kmap_types.h
1744@@ -4,6 +4,6 @@
1745 /*
1746 * This is the "bare minimum". AIO seems to require this.
1747 */
1748-#define KM_TYPE_NR 16
1749+#define KM_TYPE_NR 17
1750
1751 #endif
1752diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1753index 9e614a1..3302cca 100644
1754--- a/arch/arm/include/asm/mach/dma.h
1755+++ b/arch/arm/include/asm/mach/dma.h
1756@@ -22,7 +22,7 @@ struct dma_ops {
1757 int (*residue)(unsigned int, dma_t *); /* optional */
1758 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1759 const char *type;
1760-};
1761+} __do_const;
1762
1763 struct dma_struct {
1764 void *addr; /* single DMA address */
1765diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1766index 2fe141f..192dc01 100644
1767--- a/arch/arm/include/asm/mach/map.h
1768+++ b/arch/arm/include/asm/mach/map.h
1769@@ -27,13 +27,16 @@ struct map_desc {
1770 #define MT_MINICLEAN 6
1771 #define MT_LOW_VECTORS 7
1772 #define MT_HIGH_VECTORS 8
1773-#define MT_MEMORY 9
1774+#define MT_MEMORY_RWX 9
1775 #define MT_ROM 10
1776-#define MT_MEMORY_NONCACHED 11
1777+#define MT_MEMORY_NONCACHED_RX 11
1778 #define MT_MEMORY_DTCM 12
1779 #define MT_MEMORY_ITCM 13
1780 #define MT_MEMORY_SO 14
1781 #define MT_MEMORY_DMA_READY 15
1782+#define MT_MEMORY_RW 16
1783+#define MT_MEMORY_RX 17
1784+#define MT_MEMORY_NONCACHED_RW 18
1785
1786 #ifdef CONFIG_MMU
1787 extern void iotable_init(struct map_desc *, int);
1788diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1789index 53426c6..c7baff3 100644
1790--- a/arch/arm/include/asm/outercache.h
1791+++ b/arch/arm/include/asm/outercache.h
1792@@ -35,7 +35,7 @@ struct outer_cache_fns {
1793 #endif
1794 void (*set_debug)(unsigned long);
1795 void (*resume)(void);
1796-};
1797+} __no_const;
1798
1799 #ifdef CONFIG_OUTER_CACHE
1800
1801diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1802index 812a494..71fc0b6 100644
1803--- a/arch/arm/include/asm/page.h
1804+++ b/arch/arm/include/asm/page.h
1805@@ -114,7 +114,7 @@ struct cpu_user_fns {
1806 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1807 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1808 unsigned long vaddr, struct vm_area_struct *vma);
1809-};
1810+} __no_const;
1811
1812 #ifdef MULTI_USER
1813 extern struct cpu_user_fns cpu_user;
1814diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1815index 943504f..c37a730 100644
1816--- a/arch/arm/include/asm/pgalloc.h
1817+++ b/arch/arm/include/asm/pgalloc.h
1818@@ -17,6 +17,7 @@
1819 #include <asm/processor.h>
1820 #include <asm/cacheflush.h>
1821 #include <asm/tlbflush.h>
1822+#include <asm/system_info.h>
1823
1824 #define check_pgt_cache() do { } while (0)
1825
1826@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1827 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1828 }
1829
1830+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1831+{
1832+ pud_populate(mm, pud, pmd);
1833+}
1834+
1835 #else /* !CONFIG_ARM_LPAE */
1836
1837 /*
1838@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1839 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1840 #define pmd_free(mm, pmd) do { } while (0)
1841 #define pud_populate(mm,pmd,pte) BUG()
1842+#define pud_populate_kernel(mm,pmd,pte) BUG()
1843
1844 #endif /* CONFIG_ARM_LPAE */
1845
1846@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1847 __free_page(pte);
1848 }
1849
1850+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1851+{
1852+#ifdef CONFIG_ARM_LPAE
1853+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1854+#else
1855+ if (addr & SECTION_SIZE)
1856+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1857+ else
1858+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1859+#endif
1860+ flush_pmd_entry(pmdp);
1861+}
1862+
1863 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1864 pmdval_t prot)
1865 {
1866@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1867 static inline void
1868 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1869 {
1870- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1871+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1872 }
1873 #define pmd_pgtable(pmd) pmd_page(pmd)
1874
1875diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1876index 5cfba15..f415e1a 100644
1877--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1878+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1879@@ -20,12 +20,15 @@
1880 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1881 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1882 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1883+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1884 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1885 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1886 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1887+
1888 /*
1889 * - section
1890 */
1891+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1892 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1893 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1894 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1895@@ -37,6 +40,7 @@
1896 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1897 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1898 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1899+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1900
1901 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1902 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1903@@ -66,6 +70,7 @@
1904 * - extended small page/tiny page
1905 */
1906 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1907+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1908 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1909 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1910 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1911diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1912index f97ee02..07f1be5 100644
1913--- a/arch/arm/include/asm/pgtable-2level.h
1914+++ b/arch/arm/include/asm/pgtable-2level.h
1915@@ -125,6 +125,7 @@
1916 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1917 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1918 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1919+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1920
1921 /*
1922 * These are the memory types, defined to be compatible with
1923diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1924index d795282..a43ea90 100644
1925--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1926+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1927@@ -32,15 +32,18 @@
1928 #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0)
1929 #define PMD_BIT4 (_AT(pmdval_t, 0))
1930 #define PMD_DOMAIN(x) (_AT(pmdval_t, 0))
1931+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 59) /* PXNTable */
1932
1933 /*
1934 * - section
1935 */
1936 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1937 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1938+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1939 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1940 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1941 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1942+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53)
1943 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54)
1944 #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0))
1945 #define PMD_SECT_AP_READ (_AT(pmdval_t, 0))
1946@@ -66,6 +69,7 @@
1947 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1948 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1949 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1950+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1951 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1952
1953 /*
1954diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1955index a3f3792..7b932a6 100644
1956--- a/arch/arm/include/asm/pgtable-3level.h
1957+++ b/arch/arm/include/asm/pgtable-3level.h
1958@@ -74,6 +74,7 @@
1959 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1960 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1961 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1962+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1963 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1964 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1965 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1966@@ -82,6 +83,7 @@
1967 /*
1968 * To be used in assembly code with the upper page attributes.
1969 */
1970+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1971 #define L_PTE_XN_HIGH (1 << (54 - 32))
1972 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1973
1974diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1975index 9c82f988..514705a 100644
1976--- a/arch/arm/include/asm/pgtable.h
1977+++ b/arch/arm/include/asm/pgtable.h
1978@@ -30,6 +30,9 @@
1979 #include <asm/pgtable-2level.h>
1980 #endif
1981
1982+#define ktla_ktva(addr) (addr)
1983+#define ktva_ktla(addr) (addr)
1984+
1985 /*
1986 * Just any arbitrary offset to the start of the vmalloc VM area: the
1987 * current 8MB value just means that there will be a 8MB "hole" after the
1988@@ -45,6 +48,9 @@
1989 #define LIBRARY_TEXT_START 0x0c000000
1990
1991 #ifndef __ASSEMBLY__
1992+extern pteval_t __supported_pte_mask;
1993+extern pmdval_t __supported_pmd_mask;
1994+
1995 extern void __pte_error(const char *file, int line, pte_t);
1996 extern void __pmd_error(const char *file, int line, pmd_t);
1997 extern void __pgd_error(const char *file, int line, pgd_t);
1998@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1999 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
2000 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
2001
2002+#define __HAVE_ARCH_PAX_OPEN_KERNEL
2003+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
2004+
2005+#ifdef CONFIG_PAX_KERNEXEC
2006+#include <asm/domain.h>
2007+#include <linux/thread_info.h>
2008+#include <linux/preempt.h>
2009+#endif
2010+
2011+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2012+static inline int test_domain(int domain, int domaintype)
2013+{
2014+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
2015+}
2016+#endif
2017+
2018+#ifdef CONFIG_PAX_KERNEXEC
2019+static inline unsigned long pax_open_kernel(void) {
2020+#ifdef CONFIG_ARM_LPAE
2021+ /* TODO */
2022+#else
2023+ preempt_disable();
2024+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2025+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2026+#endif
2027+ return 0;
2028+}
2029+
2030+static inline unsigned long pax_close_kernel(void) {
2031+#ifdef CONFIG_ARM_LPAE
2032+ /* TODO */
2033+#else
2034+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2035+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2036+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2037+ preempt_enable_no_resched();
2038+#endif
2039+ return 0;
2040+}
2041+#else
2042+static inline unsigned long pax_open_kernel(void) { return 0; }
2043+static inline unsigned long pax_close_kernel(void) { return 0; }
2044+#endif
2045+
2046 /*
2047 * This is the lowest virtual address we can permit any user space
2048 * mapping to be mapped at. This is particularly important for
2049@@ -63,8 +113,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2050 /*
2051 * The pgprot_* and protection_map entries will be fixed up in runtime
2052 * to include the cachable and bufferable bits based on memory policy,
2053- * as well as any architecture dependent bits like global/ASID and SMP
2054- * shared mapping bits.
2055+ * as well as any architecture dependent bits like global/ASID, PXN,
2056+ * and SMP shared mapping bits.
2057 */
2058 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2059
2060@@ -240,7 +290,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2061
2062 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2063 {
2064- const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE;
2065+ const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER | L_PTE_NONE | __supported_pte_mask;
2066 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2067 return pte;
2068 }
2069diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2070index f3628fb..a0672dd 100644
2071--- a/arch/arm/include/asm/proc-fns.h
2072+++ b/arch/arm/include/asm/proc-fns.h
2073@@ -75,7 +75,7 @@ extern struct processor {
2074 unsigned int suspend_size;
2075 void (*do_suspend)(void *);
2076 void (*do_resume)(void *);
2077-} processor;
2078+} __do_const processor;
2079
2080 #ifndef MULTI_CPU
2081 extern void cpu_proc_init(void);
2082diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2083index 06e7d50..8a8e251 100644
2084--- a/arch/arm/include/asm/processor.h
2085+++ b/arch/arm/include/asm/processor.h
2086@@ -65,9 +65,8 @@ struct thread_struct {
2087 regs->ARM_cpsr |= PSR_ENDSTATE; \
2088 regs->ARM_pc = pc & ~1; /* pc */ \
2089 regs->ARM_sp = sp; /* sp */ \
2090- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2091- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2092- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2093+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2094+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2095 nommu_start_thread(regs); \
2096 })
2097
2098diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2099index d3a22be..3a69ad5 100644
2100--- a/arch/arm/include/asm/smp.h
2101+++ b/arch/arm/include/asm/smp.h
2102@@ -107,7 +107,7 @@ struct smp_operations {
2103 int (*cpu_disable)(unsigned int cpu);
2104 #endif
2105 #endif
2106-};
2107+} __no_const;
2108
2109 /*
2110 * set platform specific SMP operations
2111diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2112index cddda1f..ff357f7 100644
2113--- a/arch/arm/include/asm/thread_info.h
2114+++ b/arch/arm/include/asm/thread_info.h
2115@@ -77,9 +77,9 @@ struct thread_info {
2116 .flags = 0, \
2117 .preempt_count = INIT_PREEMPT_COUNT, \
2118 .addr_limit = KERNEL_DS, \
2119- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2120- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2121- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2122+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2123+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2124+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2125 .restart_block = { \
2126 .fn = do_no_restart_syscall, \
2127 }, \
2128@@ -152,6 +152,12 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2129 #define TIF_SYSCALL_AUDIT 9
2130 #define TIF_SYSCALL_TRACEPOINT 10
2131 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2132+
2133+/* within 8 bits of TIF_SYSCALL_TRACE
2134+ * to meet flexible second operand requirements
2135+ */
2136+#define TIF_GRSEC_SETXID 12
2137+
2138 #define TIF_USING_IWMMXT 17
2139 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2140 #define TIF_RESTORE_SIGMASK 20
2141@@ -165,10 +171,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2142 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2143 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2144 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2145+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2146
2147 /* Checks for any syscall work in entry-common.S */
2148 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2149- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2150+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2151
2152 /*
2153 * Change these and you break ASM code in entry-common.S
2154diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2155index 7e1f760..752fcb7 100644
2156--- a/arch/arm/include/asm/uaccess.h
2157+++ b/arch/arm/include/asm/uaccess.h
2158@@ -18,6 +18,7 @@
2159 #include <asm/domain.h>
2160 #include <asm/unified.h>
2161 #include <asm/compiler.h>
2162+#include <asm/pgtable.h>
2163
2164 #define VERIFY_READ 0
2165 #define VERIFY_WRITE 1
2166@@ -60,10 +61,34 @@ extern int __put_user_bad(void);
2167 #define USER_DS TASK_SIZE
2168 #define get_fs() (current_thread_info()->addr_limit)
2169
2170+static inline void pax_open_userland(void)
2171+{
2172+
2173+#ifdef CONFIG_PAX_MEMORY_UDEREF
2174+ if (get_fs() == USER_DS) {
2175+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2176+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2177+ }
2178+#endif
2179+
2180+}
2181+
2182+static inline void pax_close_userland(void)
2183+{
2184+
2185+#ifdef CONFIG_PAX_MEMORY_UDEREF
2186+ if (get_fs() == USER_DS) {
2187+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2188+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2189+ }
2190+#endif
2191+
2192+}
2193+
2194 static inline void set_fs(mm_segment_t fs)
2195 {
2196 current_thread_info()->addr_limit = fs;
2197- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2198+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2199 }
2200
2201 #define segment_eq(a,b) ((a) == (b))
2202@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2203
2204 #define get_user(x,p) \
2205 ({ \
2206+ int __e; \
2207 might_fault(); \
2208- __get_user_check(x,p); \
2209+ pax_open_userland(); \
2210+ __e = __get_user_check(x,p); \
2211+ pax_close_userland(); \
2212+ __e; \
2213 })
2214
2215 extern int __put_user_1(void *, unsigned int);
2216@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2217
2218 #define put_user(x,p) \
2219 ({ \
2220+ int __e; \
2221 might_fault(); \
2222- __put_user_check(x,p); \
2223+ pax_open_userland(); \
2224+ __e = __put_user_check(x,p); \
2225+ pax_close_userland(); \
2226+ __e; \
2227 })
2228
2229 #else /* CONFIG_MMU */
2230@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2231 #define __get_user(x,ptr) \
2232 ({ \
2233 long __gu_err = 0; \
2234+ pax_open_userland(); \
2235 __get_user_err((x),(ptr),__gu_err); \
2236+ pax_close_userland(); \
2237 __gu_err; \
2238 })
2239
2240 #define __get_user_error(x,ptr,err) \
2241 ({ \
2242+ pax_open_userland(); \
2243 __get_user_err((x),(ptr),err); \
2244+ pax_close_userland(); \
2245 (void) 0; \
2246 })
2247
2248@@ -312,13 +349,17 @@ do { \
2249 #define __put_user(x,ptr) \
2250 ({ \
2251 long __pu_err = 0; \
2252+ pax_open_userland(); \
2253 __put_user_err((x),(ptr),__pu_err); \
2254+ pax_close_userland(); \
2255 __pu_err; \
2256 })
2257
2258 #define __put_user_error(x,ptr,err) \
2259 ({ \
2260+ pax_open_userland(); \
2261 __put_user_err((x),(ptr),err); \
2262+ pax_close_userland(); \
2263 (void) 0; \
2264 })
2265
2266@@ -418,11 +459,44 @@ do { \
2267
2268
2269 #ifdef CONFIG_MMU
2270-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2271-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2272+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2273+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2274+
2275+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2276+{
2277+ unsigned long ret;
2278+
2279+ check_object_size(to, n, false);
2280+ pax_open_userland();
2281+ ret = ___copy_from_user(to, from, n);
2282+ pax_close_userland();
2283+ return ret;
2284+}
2285+
2286+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2287+{
2288+ unsigned long ret;
2289+
2290+ check_object_size(from, n, true);
2291+ pax_open_userland();
2292+ ret = ___copy_to_user(to, from, n);
2293+ pax_close_userland();
2294+ return ret;
2295+}
2296+
2297 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2298-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2299+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2300 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2301+
2302+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2303+{
2304+ unsigned long ret;
2305+ pax_open_userland();
2306+ ret = ___clear_user(addr, n);
2307+ pax_close_userland();
2308+ return ret;
2309+}
2310+
2311 #else
2312 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2313 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2314@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2315
2316 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2317 {
2318+ if ((long)n < 0)
2319+ return n;
2320+
2321 if (access_ok(VERIFY_READ, from, n))
2322 n = __copy_from_user(to, from, n);
2323 else /* security hole - plug it */
2324@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2325
2326 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2327 {
2328+ if ((long)n < 0)
2329+ return n;
2330+
2331 if (access_ok(VERIFY_WRITE, to, n))
2332 n = __copy_to_user(to, from, n);
2333 return n;
2334diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2335index 96ee092..37f1844 100644
2336--- a/arch/arm/include/uapi/asm/ptrace.h
2337+++ b/arch/arm/include/uapi/asm/ptrace.h
2338@@ -73,7 +73,7 @@
2339 * ARMv7 groups of PSR bits
2340 */
2341 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2342-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2343+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2344 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2345 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2346
2347diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2348index 60d3b73..d27ee09 100644
2349--- a/arch/arm/kernel/armksyms.c
2350+++ b/arch/arm/kernel/armksyms.c
2351@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2352 #ifdef CONFIG_MMU
2353 EXPORT_SYMBOL(copy_page);
2354
2355-EXPORT_SYMBOL(__copy_from_user);
2356-EXPORT_SYMBOL(__copy_to_user);
2357-EXPORT_SYMBOL(__clear_user);
2358+EXPORT_SYMBOL(___copy_from_user);
2359+EXPORT_SYMBOL(___copy_to_user);
2360+EXPORT_SYMBOL(___clear_user);
2361
2362 EXPORT_SYMBOL(__get_user_1);
2363 EXPORT_SYMBOL(__get_user_2);
2364diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2365index 0f82098..3dbd3ee 100644
2366--- a/arch/arm/kernel/entry-armv.S
2367+++ b/arch/arm/kernel/entry-armv.S
2368@@ -47,6 +47,87 @@
2369 9997:
2370 .endm
2371
2372+ .macro pax_enter_kernel
2373+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2374+ @ make aligned space for saved DACR
2375+ sub sp, sp, #8
2376+ @ save regs
2377+ stmdb sp!, {r1, r2}
2378+ @ read DACR from cpu_domain into r1
2379+ mov r2, sp
2380+ @ assume 8K pages, since we have to split the immediate in two
2381+ bic r2, r2, #(0x1fc0)
2382+ bic r2, r2, #(0x3f)
2383+ ldr r1, [r2, #TI_CPU_DOMAIN]
2384+ @ store old DACR on stack
2385+ str r1, [sp, #8]
2386+#ifdef CONFIG_PAX_KERNEXEC
2387+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2388+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2389+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2390+#endif
2391+#ifdef CONFIG_PAX_MEMORY_UDEREF
2392+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2393+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2394+#endif
2395+ @ write r1 to current_thread_info()->cpu_domain
2396+ str r1, [r2, #TI_CPU_DOMAIN]
2397+ @ write r1 to DACR
2398+ mcr p15, 0, r1, c3, c0, 0
2399+ @ instruction sync
2400+ instr_sync
2401+ @ restore regs
2402+ ldmia sp!, {r1, r2}
2403+#endif
2404+ .endm
2405+
2406+ .macro pax_open_userland
2407+#ifdef CONFIG_PAX_MEMORY_UDEREF
2408+ @ save regs
2409+ stmdb sp!, {r0, r1}
2410+ @ read DACR from cpu_domain into r1
2411+ mov r0, sp
2412+ @ assume 8K pages, since we have to split the immediate in two
2413+ bic r0, r0, #(0x1fc0)
2414+ bic r0, r0, #(0x3f)
2415+ ldr r1, [r0, #TI_CPU_DOMAIN]
2416+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2417+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2418+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2419+ @ write r1 to current_thread_info()->cpu_domain
2420+ str r1, [r0, #TI_CPU_DOMAIN]
2421+ @ write r1 to DACR
2422+ mcr p15, 0, r1, c3, c0, 0
2423+ @ instruction sync
2424+ instr_sync
2425+ @ restore regs
2426+ ldmia sp!, {r0, r1}
2427+#endif
2428+ .endm
2429+
2430+ .macro pax_close_userland
2431+#ifdef CONFIG_PAX_MEMORY_UDEREF
2432+ @ save regs
2433+ stmdb sp!, {r0, r1}
2434+ @ read DACR from cpu_domain into r1
2435+ mov r0, sp
2436+ @ assume 8K pages, since we have to split the immediate in two
2437+ bic r0, r0, #(0x1fc0)
2438+ bic r0, r0, #(0x3f)
2439+ ldr r1, [r0, #TI_CPU_DOMAIN]
2440+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2441+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2442+ @ write r1 to current_thread_info()->cpu_domain
2443+ str r1, [r0, #TI_CPU_DOMAIN]
2444+ @ write r1 to DACR
2445+ mcr p15, 0, r1, c3, c0, 0
2446+ @ instruction sync
2447+ instr_sync
2448+ @ restore regs
2449+ ldmia sp!, {r0, r1}
2450+#endif
2451+ .endm
2452+
2453 .macro pabt_helper
2454 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2455 #ifdef MULTI_PABORT
2456@@ -89,11 +170,15 @@
2457 * Invalid mode handlers
2458 */
2459 .macro inv_entry, reason
2460+
2461+ pax_enter_kernel
2462+
2463 sub sp, sp, #S_FRAME_SIZE
2464 ARM( stmib sp, {r1 - lr} )
2465 THUMB( stmia sp, {r0 - r12} )
2466 THUMB( str sp, [sp, #S_SP] )
2467 THUMB( str lr, [sp, #S_LR] )
2468+
2469 mov r1, #\reason
2470 .endm
2471
2472@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2473 .macro svc_entry, stack_hole=0
2474 UNWIND(.fnstart )
2475 UNWIND(.save {r0 - pc} )
2476+
2477+ pax_enter_kernel
2478+
2479 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2480+
2481 #ifdef CONFIG_THUMB2_KERNEL
2482 SPFIX( str r0, [sp] ) @ temporarily saved
2483 SPFIX( mov r0, sp )
2484@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2485 ldmia r0, {r3 - r5}
2486 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2487 mov r6, #-1 @ "" "" "" ""
2488+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2489+ @ offset sp by 8 as done in pax_enter_kernel
2490+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2491+#else
2492 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2493+#endif
2494 SPFIX( addeq r2, r2, #4 )
2495 str r3, [sp, #-4]! @ save the "real" r0 copied
2496 @ from the exception stack
2497@@ -359,6 +453,9 @@ ENDPROC(__pabt_svc)
2498 .macro usr_entry
2499 UNWIND(.fnstart )
2500 UNWIND(.cantunwind ) @ don't unwind the user space
2501+
2502+ pax_enter_kernel_user
2503+
2504 sub sp, sp, #S_FRAME_SIZE
2505 ARM( stmib sp, {r1 - r12} )
2506 THUMB( stmia sp, {r0 - r12} )
2507@@ -456,7 +553,9 @@ __und_usr:
2508 tst r3, #PSR_T_BIT @ Thumb mode?
2509 bne __und_usr_thumb
2510 sub r4, r2, #4 @ ARM instr at LR - 4
2511+ pax_open_userland
2512 1: ldrt r0, [r4]
2513+ pax_close_userland
2514 #ifdef CONFIG_CPU_ENDIAN_BE8
2515 rev r0, r0 @ little endian instruction
2516 #endif
2517@@ -491,10 +590,14 @@ __und_usr_thumb:
2518 */
2519 .arch armv6t2
2520 #endif
2521+ pax_open_userland
2522 2: ldrht r5, [r4]
2523+ pax_close_userland
2524 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2525 blo __und_usr_fault_16 @ 16bit undefined instruction
2526+ pax_open_userland
2527 3: ldrht r0, [r2]
2528+ pax_close_userland
2529 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2530 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2531 orr r0, r0, r5, lsl #16
2532@@ -733,7 +836,7 @@ ENTRY(__switch_to)
2533 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2534 THUMB( str sp, [ip], #4 )
2535 THUMB( str lr, [ip], #4 )
2536-#ifdef CONFIG_CPU_USE_DOMAINS
2537+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2538 ldr r6, [r2, #TI_CPU_DOMAIN]
2539 #endif
2540 set_tls r3, r4, r5
2541@@ -742,7 +845,7 @@ ENTRY(__switch_to)
2542 ldr r8, =__stack_chk_guard
2543 ldr r7, [r7, #TSK_STACK_CANARY]
2544 #endif
2545-#ifdef CONFIG_CPU_USE_DOMAINS
2546+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC)
2547 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2548 #endif
2549 mov r5, r0
2550diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2551index a6c301e..908821b 100644
2552--- a/arch/arm/kernel/entry-common.S
2553+++ b/arch/arm/kernel/entry-common.S
2554@@ -10,18 +10,46 @@
2555
2556 #include <asm/unistd.h>
2557 #include <asm/ftrace.h>
2558+#include <asm/domain.h>
2559 #include <asm/unwind.h>
2560
2561+#include "entry-header.S"
2562+
2563 #ifdef CONFIG_NEED_RET_TO_USER
2564 #include <mach/entry-macro.S>
2565 #else
2566 .macro arch_ret_to_user, tmp1, tmp2
2567+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2568+ @ save regs
2569+ stmdb sp!, {r1, r2}
2570+ @ read DACR from cpu_domain into r1
2571+ mov r2, sp
2572+ @ assume 8K pages, since we have to split the immediate in two
2573+ bic r2, r2, #(0x1fc0)
2574+ bic r2, r2, #(0x3f)
2575+ ldr r1, [r2, #TI_CPU_DOMAIN]
2576+#ifdef CONFIG_PAX_KERNEXEC
2577+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2578+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2579+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2580+#endif
2581+#ifdef CONFIG_PAX_MEMORY_UDEREF
2582+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2583+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2584+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2585+#endif
2586+ @ write r1 to current_thread_info()->cpu_domain
2587+ str r1, [r2, #TI_CPU_DOMAIN]
2588+ @ write r1 to DACR
2589+ mcr p15, 0, r1, c3, c0, 0
2590+ @ instruction sync
2591+ instr_sync
2592+ @ restore regs
2593+ ldmia sp!, {r1, r2}
2594+#endif
2595 .endm
2596 #endif
2597
2598-#include "entry-header.S"
2599-
2600-
2601 .align 5
2602 /*
2603 * This is the fast syscall return path. We do as little as
2604@@ -339,6 +367,7 @@ ENDPROC(ftrace_stub)
2605
2606 .align 5
2607 ENTRY(vector_swi)
2608+
2609 sub sp, sp, #S_FRAME_SIZE
2610 stmia sp, {r0 - r12} @ Calling r0 - r12
2611 ARM( add r8, sp, #S_PC )
2612@@ -388,6 +417,12 @@ ENTRY(vector_swi)
2613 ldr scno, [lr, #-4] @ get SWI instruction
2614 #endif
2615
2616+ /*
2617+ * do this here to avoid a performance hit of wrapping the code above
2618+ * that directly dereferences userland to parse the SWI instruction
2619+ */
2620+ pax_enter_kernel_user
2621+
2622 #ifdef CONFIG_ALIGNMENT_TRAP
2623 ldr ip, __cr_alignment
2624 ldr ip, [ip]
2625diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2626index 9a8531e..812e287 100644
2627--- a/arch/arm/kernel/entry-header.S
2628+++ b/arch/arm/kernel/entry-header.S
2629@@ -73,9 +73,66 @@
2630 msr cpsr_c, \rtemp @ switch back to the SVC mode
2631 .endm
2632
2633+ .macro pax_enter_kernel_user
2634+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2635+ @ save regs
2636+ stmdb sp!, {r0, r1}
2637+ @ read DACR from cpu_domain into r1
2638+ mov r0, sp
2639+ @ assume 8K pages, since we have to split the immediate in two
2640+ bic r0, r0, #(0x1fc0)
2641+ bic r0, r0, #(0x3f)
2642+ ldr r1, [r0, #TI_CPU_DOMAIN]
2643+#ifdef CONFIG_PAX_MEMORY_UDEREF
2644+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2645+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2646+#endif
2647+#ifdef CONFIG_PAX_KERNEXEC
2648+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2649+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2650+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2651+#endif
2652+ @ write r1 to current_thread_info()->cpu_domain
2653+ str r1, [r0, #TI_CPU_DOMAIN]
2654+ @ write r1 to DACR
2655+ mcr p15, 0, r1, c3, c0, 0
2656+ @ instruction sync
2657+ instr_sync
2658+ @ restore regs
2659+ ldmia sp!, {r0, r1}
2660+#endif
2661+ .endm
2662+
2663+ .macro pax_exit_kernel
2664+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2665+ @ save regs
2666+ stmdb sp!, {r0, r1}
2667+ @ read old DACR from stack into r1
2668+ ldr r1, [sp, #(8 + S_SP)]
2669+ sub r1, r1, #8
2670+ ldr r1, [r1]
2671+
2672+ @ write r1 to current_thread_info()->cpu_domain
2673+ mov r0, sp
2674+ @ assume 8K pages, since we have to split the immediate in two
2675+ bic r0, r0, #(0x1fc0)
2676+ bic r0, r0, #(0x3f)
2677+ str r1, [r0, #TI_CPU_DOMAIN]
2678+ @ write r1 to DACR
2679+ mcr p15, 0, r1, c3, c0, 0
2680+ @ instruction sync
2681+ instr_sync
2682+ @ restore regs
2683+ ldmia sp!, {r0, r1}
2684+#endif
2685+ .endm
2686+
2687 #ifndef CONFIG_THUMB2_KERNEL
2688 .macro svc_exit, rpsr
2689 msr spsr_cxsf, \rpsr
2690+
2691+ pax_exit_kernel
2692+
2693 #if defined(CONFIG_CPU_V6)
2694 ldr r0, [sp]
2695 strex r1, r2, [sp] @ clear the exclusive monitor
2696@@ -121,6 +178,9 @@
2697 .endm
2698 #else /* CONFIG_THUMB2_KERNEL */
2699 .macro svc_exit, rpsr
2700+
2701+ pax_exit_kernel
2702+
2703 ldr lr, [sp, #S_SP] @ top of the stack
2704 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2705 clrex @ clear the exclusive monitor
2706diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2707index 2adda11..7fbe958 100644
2708--- a/arch/arm/kernel/fiq.c
2709+++ b/arch/arm/kernel/fiq.c
2710@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2711 #if defined(CONFIG_CPU_USE_DOMAINS)
2712 memcpy((void *)0xffff001c, start, length);
2713 #else
2714+ pax_open_kernel();
2715 memcpy(vectors_page + 0x1c, start, length);
2716+ pax_close_kernel();
2717 #endif
2718 flush_icache_range(0xffff001c, 0xffff001c + length);
2719 if (!vectors_high())
2720diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2721index 486a15a..2d6880e 100644
2722--- a/arch/arm/kernel/head.S
2723+++ b/arch/arm/kernel/head.S
2724@@ -52,7 +52,9 @@
2725 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2726
2727 .macro pgtbl, rd, phys
2728- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2729+ mov \rd, #TEXT_OFFSET
2730+ sub \rd, #PG_DIR_SIZE
2731+ add \rd, \rd, \phys
2732 .endm
2733
2734 /*
2735@@ -416,7 +418,7 @@ __enable_mmu:
2736 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2737 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2738 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2739- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2740+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2741 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2742 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2743 #endif
2744diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2745index 5ff2e77..556d030 100644
2746--- a/arch/arm/kernel/hw_breakpoint.c
2747+++ b/arch/arm/kernel/hw_breakpoint.c
2748@@ -1011,7 +1011,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2749 return NOTIFY_OK;
2750 }
2751
2752-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2753+static struct notifier_block dbg_reset_nb = {
2754 .notifier_call = dbg_reset_notify,
2755 };
2756
2757diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2758index 1e9be5d..03edbc2 100644
2759--- a/arch/arm/kernel/module.c
2760+++ b/arch/arm/kernel/module.c
2761@@ -37,12 +37,37 @@
2762 #endif
2763
2764 #ifdef CONFIG_MMU
2765-void *module_alloc(unsigned long size)
2766+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2767 {
2768+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2769+ return NULL;
2770 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2771- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2772+ GFP_KERNEL, prot, -1,
2773 __builtin_return_address(0));
2774 }
2775+
2776+void *module_alloc(unsigned long size)
2777+{
2778+
2779+#ifdef CONFIG_PAX_KERNEXEC
2780+ return __module_alloc(size, PAGE_KERNEL);
2781+#else
2782+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2783+#endif
2784+
2785+}
2786+
2787+#ifdef CONFIG_PAX_KERNEXEC
2788+void module_free_exec(struct module *mod, void *module_region)
2789+{
2790+ module_free(mod, module_region);
2791+}
2792+
2793+void *module_alloc_exec(unsigned long size)
2794+{
2795+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2796+}
2797+#endif
2798 #endif
2799
2800 int
2801diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2802index 07314af..c46655c 100644
2803--- a/arch/arm/kernel/patch.c
2804+++ b/arch/arm/kernel/patch.c
2805@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2806 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2807 int size;
2808
2809+ pax_open_kernel();
2810 if (thumb2 && __opcode_is_thumb16(insn)) {
2811 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2812 size = sizeof(u16);
2813@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2814 *(u32 *)addr = insn;
2815 size = sizeof(u32);
2816 }
2817+ pax_close_kernel();
2818
2819 flush_icache_range((uintptr_t)(addr),
2820 (uintptr_t)(addr) + size);
2821diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2822index 5f66206..dce492f 100644
2823--- a/arch/arm/kernel/perf_event_cpu.c
2824+++ b/arch/arm/kernel/perf_event_cpu.c
2825@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2826 return NOTIFY_OK;
2827 }
2828
2829-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2830+static struct notifier_block cpu_pmu_hotplug_notifier = {
2831 .notifier_call = cpu_pmu_notify,
2832 };
2833
2834diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2835index c6dec5f..f853532 100644
2836--- a/arch/arm/kernel/process.c
2837+++ b/arch/arm/kernel/process.c
2838@@ -28,7 +28,6 @@
2839 #include <linux/tick.h>
2840 #include <linux/utsname.h>
2841 #include <linux/uaccess.h>
2842-#include <linux/random.h>
2843 #include <linux/hw_breakpoint.h>
2844 #include <linux/cpuidle.h>
2845 #include <linux/leds.h>
2846@@ -256,9 +255,10 @@ void machine_power_off(void)
2847 machine_shutdown();
2848 if (pm_power_off)
2849 pm_power_off();
2850+ BUG();
2851 }
2852
2853-void machine_restart(char *cmd)
2854+__noreturn void machine_restart(char *cmd)
2855 {
2856 machine_shutdown();
2857
2858@@ -283,8 +283,8 @@ void __show_regs(struct pt_regs *regs)
2859 init_utsname()->release,
2860 (int)strcspn(init_utsname()->version, " "),
2861 init_utsname()->version);
2862- print_symbol("PC is at %s\n", instruction_pointer(regs));
2863- print_symbol("LR is at %s\n", regs->ARM_lr);
2864+ printk("PC is at %pA\n", instruction_pointer(regs));
2865+ printk("LR is at %pA\n", regs->ARM_lr);
2866 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2867 "sp : %08lx ip : %08lx fp : %08lx\n",
2868 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2869@@ -452,12 +452,6 @@ unsigned long get_wchan(struct task_struct *p)
2870 return 0;
2871 }
2872
2873-unsigned long arch_randomize_brk(struct mm_struct *mm)
2874-{
2875- unsigned long range_end = mm->brk + 0x02000000;
2876- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2877-}
2878-
2879 #ifdef CONFIG_MMU
2880 /*
2881 * The vectors page is always readable from user space for the
2882diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2883index 03deeff..741ce88 100644
2884--- a/arch/arm/kernel/ptrace.c
2885+++ b/arch/arm/kernel/ptrace.c
2886@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2887 return current_thread_info()->syscall;
2888 }
2889
2890+#ifdef CONFIG_GRKERNSEC_SETXID
2891+extern void gr_delayed_cred_worker(void);
2892+#endif
2893+
2894 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2895 {
2896 current_thread_info()->syscall = scno;
2897
2898+#ifdef CONFIG_GRKERNSEC_SETXID
2899+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2900+ gr_delayed_cred_worker();
2901+#endif
2902+
2903 /* Do the secure computing check first; failures should be fast. */
2904 if (secure_computing(scno) == -1)
2905 return -1;
2906diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2907index 3f6cbb2..6d856f5 100644
2908--- a/arch/arm/kernel/setup.c
2909+++ b/arch/arm/kernel/setup.c
2910@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2911 unsigned int elf_hwcap __read_mostly;
2912 EXPORT_SYMBOL(elf_hwcap);
2913
2914+pteval_t __supported_pte_mask __read_only;
2915+pmdval_t __supported_pmd_mask __read_only;
2916
2917 #ifdef MULTI_CPU
2918-struct processor processor __read_mostly;
2919+struct processor processor;
2920 #endif
2921 #ifdef MULTI_TLB
2922-struct cpu_tlb_fns cpu_tlb __read_mostly;
2923+struct cpu_tlb_fns cpu_tlb __read_only;
2924 #endif
2925 #ifdef MULTI_USER
2926-struct cpu_user_fns cpu_user __read_mostly;
2927+struct cpu_user_fns cpu_user __read_only;
2928 #endif
2929 #ifdef MULTI_CACHE
2930-struct cpu_cache_fns cpu_cache __read_mostly;
2931+struct cpu_cache_fns cpu_cache __read_only;
2932 #endif
2933 #ifdef CONFIG_OUTER_CACHE
2934-struct outer_cache_fns outer_cache __read_mostly;
2935+struct outer_cache_fns outer_cache __read_only;
2936 EXPORT_SYMBOL(outer_cache);
2937 #endif
2938
2939@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2940 asm("mrc p15, 0, %0, c0, c1, 4"
2941 : "=r" (mmfr0));
2942 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2943- (mmfr0 & 0x000000f0) >= 0x00000030)
2944+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2945 cpu_arch = CPU_ARCH_ARMv7;
2946- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2947+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2948+ __supported_pte_mask |= L_PTE_PXN;
2949+ __supported_pmd_mask |= PMD_PXNTABLE;
2950+ }
2951+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2952 (mmfr0 & 0x000000f0) == 0x00000020)
2953 cpu_arch = CPU_ARCH_ARMv6;
2954 else
2955@@ -462,7 +468,7 @@ static void __init setup_processor(void)
2956 __cpu_architecture = __get_cpu_architecture();
2957
2958 #ifdef MULTI_CPU
2959- processor = *list->proc;
2960+ memcpy((void *)&processor, list->proc, sizeof processor);
2961 #endif
2962 #ifdef MULTI_TLB
2963 cpu_tlb = *list->tlb;
2964diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
2965index 84f4cbf..672f5b8 100644
2966--- a/arch/arm/kernel/smp.c
2967+++ b/arch/arm/kernel/smp.c
2968@@ -70,7 +70,7 @@ enum ipi_msg_type {
2969
2970 static DECLARE_COMPLETION(cpu_running);
2971
2972-static struct smp_operations smp_ops;
2973+static struct smp_operations smp_ops __read_only;
2974
2975 void __init smp_set_ops(struct smp_operations *ops)
2976 {
2977diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
2978index b0179b8..b7b16c7 100644
2979--- a/arch/arm/kernel/traps.c
2980+++ b/arch/arm/kernel/traps.c
2981@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
2982 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
2983 {
2984 #ifdef CONFIG_KALLSYMS
2985- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
2986+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
2987 #else
2988 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
2989 #endif
2990@@ -266,6 +266,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
2991 static int die_owner = -1;
2992 static unsigned int die_nest_count;
2993
2994+extern void gr_handle_kernel_exploit(void);
2995+
2996 static unsigned long oops_begin(void)
2997 {
2998 int cpu;
2999@@ -308,6 +310,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3000 panic("Fatal exception in interrupt");
3001 if (panic_on_oops)
3002 panic("Fatal exception");
3003+
3004+ gr_handle_kernel_exploit();
3005+
3006 if (signr)
3007 do_exit(signr);
3008 }
3009@@ -601,7 +606,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3010 * The user helper at 0xffff0fe0 must be used instead.
3011 * (see entry-armv.S for details)
3012 */
3013+ pax_open_kernel();
3014 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3015+ pax_close_kernel();
3016 }
3017 return 0;
3018
3019@@ -849,5 +856,9 @@ void __init early_trap_init(void *vectors_base)
3020 sigreturn_codes, sizeof(sigreturn_codes));
3021
3022 flush_icache_range(vectors, vectors + PAGE_SIZE);
3023- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3024+
3025+#ifndef CONFIG_PAX_MEMORY_UDEREF
3026+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3027+#endif
3028+
3029 }
3030diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3031index 11c1785..c67d54c 100644
3032--- a/arch/arm/kernel/vmlinux.lds.S
3033+++ b/arch/arm/kernel/vmlinux.lds.S
3034@@ -8,7 +8,11 @@
3035 #include <asm/thread_info.h>
3036 #include <asm/memory.h>
3037 #include <asm/page.h>
3038-
3039+
3040+#ifdef CONFIG_PAX_KERNEXEC
3041+#include <asm/pgtable.h>
3042+#endif
3043+
3044 #define PROC_INFO \
3045 . = ALIGN(4); \
3046 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3047@@ -90,6 +94,11 @@ SECTIONS
3048 _text = .;
3049 HEAD_TEXT
3050 }
3051+
3052+#ifdef CONFIG_PAX_KERNEXEC
3053+ . = ALIGN(1<<SECTION_SHIFT);
3054+#endif
3055+
3056 .text : { /* Real text segment */
3057 _stext = .; /* Text and read-only data */
3058 __exception_text_start = .;
3059@@ -144,6 +153,10 @@ SECTIONS
3060
3061 _etext = .; /* End of text and rodata section */
3062
3063+#ifdef CONFIG_PAX_KERNEXEC
3064+ . = ALIGN(1<<SECTION_SHIFT);
3065+#endif
3066+
3067 #ifndef CONFIG_XIP_KERNEL
3068 . = ALIGN(PAGE_SIZE);
3069 __init_begin = .;
3070@@ -203,6 +216,11 @@ SECTIONS
3071 . = PAGE_OFFSET + TEXT_OFFSET;
3072 #else
3073 __init_end = .;
3074+
3075+#ifdef CONFIG_PAX_KERNEXEC
3076+ . = ALIGN(1<<SECTION_SHIFT);
3077+#endif
3078+
3079 . = ALIGN(THREAD_SIZE);
3080 __data_loc = .;
3081 #endif
3082diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3083index 14a0d98..7771a7d 100644
3084--- a/arch/arm/lib/clear_user.S
3085+++ b/arch/arm/lib/clear_user.S
3086@@ -12,14 +12,14 @@
3087
3088 .text
3089
3090-/* Prototype: int __clear_user(void *addr, size_t sz)
3091+/* Prototype: int ___clear_user(void *addr, size_t sz)
3092 * Purpose : clear some user memory
3093 * Params : addr - user memory address to clear
3094 * : sz - number of bytes to clear
3095 * Returns : number of bytes NOT cleared
3096 */
3097 ENTRY(__clear_user_std)
3098-WEAK(__clear_user)
3099+WEAK(___clear_user)
3100 stmfd sp!, {r1, lr}
3101 mov r2, #0
3102 cmp r1, #4
3103@@ -44,7 +44,7 @@ WEAK(__clear_user)
3104 USER( strnebt r2, [r0])
3105 mov r0, #0
3106 ldmfd sp!, {r1, pc}
3107-ENDPROC(__clear_user)
3108+ENDPROC(___clear_user)
3109 ENDPROC(__clear_user_std)
3110
3111 .pushsection .fixup,"ax"
3112diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3113index 66a477a..bee61d3 100644
3114--- a/arch/arm/lib/copy_from_user.S
3115+++ b/arch/arm/lib/copy_from_user.S
3116@@ -16,7 +16,7 @@
3117 /*
3118 * Prototype:
3119 *
3120- * size_t __copy_from_user(void *to, const void *from, size_t n)
3121+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3122 *
3123 * Purpose:
3124 *
3125@@ -84,11 +84,11 @@
3126
3127 .text
3128
3129-ENTRY(__copy_from_user)
3130+ENTRY(___copy_from_user)
3131
3132 #include "copy_template.S"
3133
3134-ENDPROC(__copy_from_user)
3135+ENDPROC(___copy_from_user)
3136
3137 .pushsection .fixup,"ax"
3138 .align 0
3139diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3140index 6ee2f67..d1cce76 100644
3141--- a/arch/arm/lib/copy_page.S
3142+++ b/arch/arm/lib/copy_page.S
3143@@ -10,6 +10,7 @@
3144 * ASM optimised string functions
3145 */
3146 #include <linux/linkage.h>
3147+#include <linux/const.h>
3148 #include <asm/assembler.h>
3149 #include <asm/asm-offsets.h>
3150 #include <asm/cache.h>
3151diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3152index d066df6..df28194 100644
3153--- a/arch/arm/lib/copy_to_user.S
3154+++ b/arch/arm/lib/copy_to_user.S
3155@@ -16,7 +16,7 @@
3156 /*
3157 * Prototype:
3158 *
3159- * size_t __copy_to_user(void *to, const void *from, size_t n)
3160+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3161 *
3162 * Purpose:
3163 *
3164@@ -88,11 +88,11 @@
3165 .text
3166
3167 ENTRY(__copy_to_user_std)
3168-WEAK(__copy_to_user)
3169+WEAK(___copy_to_user)
3170
3171 #include "copy_template.S"
3172
3173-ENDPROC(__copy_to_user)
3174+ENDPROC(___copy_to_user)
3175 ENDPROC(__copy_to_user_std)
3176
3177 .pushsection .fixup,"ax"
3178diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3179index 7d08b43..f7ca7ea 100644
3180--- a/arch/arm/lib/csumpartialcopyuser.S
3181+++ b/arch/arm/lib/csumpartialcopyuser.S
3182@@ -57,8 +57,8 @@
3183 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3184 */
3185
3186-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3187-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3188+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3189+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3190
3191 #include "csumpartialcopygeneric.S"
3192
3193diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3194index 0dc5385..45833ef 100644
3195--- a/arch/arm/lib/delay.c
3196+++ b/arch/arm/lib/delay.c
3197@@ -28,12 +28,14 @@
3198 /*
3199 * Default to the loop-based delay implementation.
3200 */
3201-struct arm_delay_ops arm_delay_ops = {
3202+static struct arm_delay_ops arm_loop_delay_ops = {
3203 .delay = __loop_delay,
3204 .const_udelay = __loop_const_udelay,
3205 .udelay = __loop_udelay,
3206 };
3207
3208+struct arm_delay_ops *arm_delay_ops __read_only = &arm_loop_delay_ops;
3209+
3210 static const struct delay_timer *delay_timer;
3211 static bool delay_calibrated;
3212
3213@@ -67,6 +69,12 @@ static void __timer_udelay(unsigned long usecs)
3214 __timer_const_udelay(usecs * UDELAY_MULT);
3215 }
3216
3217+static struct arm_delay_ops arm_timer_delay_ops = {
3218+ .delay = __timer_delay,
3219+ .const_udelay = __timer_const_udelay,
3220+ .udelay = __timer_udelay,
3221+};
3222+
3223 void __init register_current_timer_delay(const struct delay_timer *timer)
3224 {
3225 if (!delay_calibrated) {
3226@@ -74,9 +82,7 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
3227 delay_timer = timer;
3228 lpj_fine = timer->freq / HZ;
3229 loops_per_jiffy = lpj_fine;
3230- arm_delay_ops.delay = __timer_delay;
3231- arm_delay_ops.const_udelay = __timer_const_udelay;
3232- arm_delay_ops.udelay = __timer_udelay;
3233+ arm_delay_ops = &arm_timer_delay_ops;
3234 delay_calibrated = true;
3235 } else {
3236 pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
3237diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3238index 025f742..8432b08 100644
3239--- a/arch/arm/lib/uaccess_with_memcpy.c
3240+++ b/arch/arm/lib/uaccess_with_memcpy.c
3241@@ -104,7 +104,7 @@ out:
3242 }
3243
3244 unsigned long
3245-__copy_to_user(void __user *to, const void *from, unsigned long n)
3246+___copy_to_user(void __user *to, const void *from, unsigned long n)
3247 {
3248 /*
3249 * This test is stubbed out of the main function above to keep
3250diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3251index bac21a5..b67ef8e 100644
3252--- a/arch/arm/mach-kirkwood/common.c
3253+++ b/arch/arm/mach-kirkwood/common.c
3254@@ -150,7 +150,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3255 clk_gate_ops.disable(hw);
3256 }
3257
3258-static struct clk_ops clk_gate_fn_ops;
3259+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3260+{
3261+ return clk_gate_ops.is_enabled(hw);
3262+}
3263+
3264+static struct clk_ops clk_gate_fn_ops = {
3265+ .enable = clk_gate_fn_enable,
3266+ .disable = clk_gate_fn_disable,
3267+ .is_enabled = clk_gate_fn_is_enabled,
3268+};
3269
3270 static struct clk __init *clk_register_gate_fn(struct device *dev,
3271 const char *name,
3272@@ -184,14 +193,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3273 gate_fn->fn_en = fn_en;
3274 gate_fn->fn_dis = fn_dis;
3275
3276- /* ops is the gate ops, but with our enable/disable functions */
3277- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3278- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3279- clk_gate_fn_ops = clk_gate_ops;
3280- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3281- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3282- }
3283-
3284 clk = clk_register(dev, &gate_fn->gate.hw);
3285
3286 if (IS_ERR(clk))
3287diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3288index 0abb30f..54064da 100644
3289--- a/arch/arm/mach-omap2/board-n8x0.c
3290+++ b/arch/arm/mach-omap2/board-n8x0.c
3291@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3292 }
3293 #endif
3294
3295-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3296+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3297 .late_init = n8x0_menelaus_late_init,
3298 };
3299
3300diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3301index 5d3b4f4..ddba3c0 100644
3302--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3303+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3304@@ -340,7 +340,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3305 return NOTIFY_OK;
3306 }
3307
3308-static struct notifier_block __refdata irq_hotplug_notifier = {
3309+static struct notifier_block irq_hotplug_notifier = {
3310 .notifier_call = irq_cpu_hotplug_notify,
3311 };
3312
3313diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3314index 4653efb..8c60bf7 100644
3315--- a/arch/arm/mach-omap2/omap_hwmod.c
3316+++ b/arch/arm/mach-omap2/omap_hwmod.c
3317@@ -189,10 +189,10 @@ struct omap_hwmod_soc_ops {
3318 int (*init_clkdm)(struct omap_hwmod *oh);
3319 void (*update_context_lost)(struct omap_hwmod *oh);
3320 int (*get_context_lost)(struct omap_hwmod *oh);
3321-};
3322+} __no_const;
3323
3324 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3325-static struct omap_hwmod_soc_ops soc_ops;
3326+static struct omap_hwmod_soc_ops soc_ops __read_only;
3327
3328 /* omap_hwmod_list contains all registered struct omap_hwmods */
3329 static LIST_HEAD(omap_hwmod_list);
3330diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3331index 7c2b4ed..b2ea51f 100644
3332--- a/arch/arm/mach-omap2/wd_timer.c
3333+++ b/arch/arm/mach-omap2/wd_timer.c
3334@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3335 struct omap_hwmod *oh;
3336 char *oh_name = "wd_timer2";
3337 char *dev_name = "omap_wdt";
3338- struct omap_wd_timer_platform_data pdata;
3339+ static struct omap_wd_timer_platform_data pdata = {
3340+ .read_reset_sources = prm_read_reset_sources
3341+ };
3342
3343 if (!cpu_class_is_omap2() || of_have_populated_dt())
3344 return 0;
3345@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3346 return -EINVAL;
3347 }
3348
3349- pdata.read_reset_sources = prm_read_reset_sources;
3350-
3351 pdev = omap_device_build(dev_name, id, oh, &pdata,
3352 sizeof(struct omap_wd_timer_platform_data),
3353 NULL, 0, 0);
3354diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
3355index 6be4c4d..32ac32a 100644
3356--- a/arch/arm/mach-ux500/include/mach/setup.h
3357+++ b/arch/arm/mach-ux500/include/mach/setup.h
3358@@ -38,13 +38,6 @@ extern struct sys_timer ux500_timer;
3359 .type = MT_DEVICE, \
3360 }
3361
3362-#define __MEM_DEV_DESC(x, sz) { \
3363- .virtual = IO_ADDRESS(x), \
3364- .pfn = __phys_to_pfn(x), \
3365- .length = sz, \
3366- .type = MT_MEMORY, \
3367-}
3368-
3369 extern struct smp_operations ux500_smp_ops;
3370 extern void ux500_cpu_die(unsigned int cpu);
3371
3372diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3373index 3fd629d..8b1aca9 100644
3374--- a/arch/arm/mm/Kconfig
3375+++ b/arch/arm/mm/Kconfig
3376@@ -425,7 +425,7 @@ config CPU_32v5
3377
3378 config CPU_32v6
3379 bool
3380- select CPU_USE_DOMAINS if CPU_V6 && MMU
3381+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC
3382 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3383
3384 config CPU_32v6K
3385@@ -577,6 +577,7 @@ config CPU_CP15_MPU
3386
3387 config CPU_USE_DOMAINS
3388 bool
3389+ depends on !ARM_LPAE && !PAX_KERNEXEC
3390 help
3391 This option enables or disables the use of domain switching
3392 via the set_fs() function.
3393diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3394index 5dbf13f..6393f55 100644
3395--- a/arch/arm/mm/fault.c
3396+++ b/arch/arm/mm/fault.c
3397@@ -25,6 +25,7 @@
3398 #include <asm/system_misc.h>
3399 #include <asm/system_info.h>
3400 #include <asm/tlbflush.h>
3401+#include <asm/sections.h>
3402
3403 #include "fault.h"
3404
3405@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3406 if (fixup_exception(regs))
3407 return;
3408
3409+#ifdef CONFIG_PAX_KERNEXEC
3410+ if ((fsr & FSR_WRITE) &&
3411+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3412+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3413+ {
3414+ if (current->signal->curr_ip)
3415+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3416+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
3417+ else
3418+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3419+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
3420+ }
3421+#endif
3422+
3423 /*
3424 * No handler, we'll have to terminate things with extreme prejudice.
3425 */
3426@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3427 }
3428 #endif
3429
3430+#ifdef CONFIG_PAX_PAGEEXEC
3431+ if (fsr & FSR_LNX_PF) {
3432+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3433+ do_group_exit(SIGKILL);
3434+ }
3435+#endif
3436+
3437 tsk->thread.address = addr;
3438 tsk->thread.error_code = fsr;
3439 tsk->thread.trap_no = 14;
3440@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3441 }
3442 #endif /* CONFIG_MMU */
3443
3444+#ifdef CONFIG_PAX_PAGEEXEC
3445+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3446+{
3447+ long i;
3448+
3449+ printk(KERN_ERR "PAX: bytes at PC: ");
3450+ for (i = 0; i < 20; i++) {
3451+ unsigned char c;
3452+ if (get_user(c, (__force unsigned char __user *)pc+i))
3453+ printk(KERN_CONT "?? ");
3454+ else
3455+ printk(KERN_CONT "%02x ", c);
3456+ }
3457+ printk("\n");
3458+
3459+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3460+ for (i = -1; i < 20; i++) {
3461+ unsigned long c;
3462+ if (get_user(c, (__force unsigned long __user *)sp+i))
3463+ printk(KERN_CONT "???????? ");
3464+ else
3465+ printk(KERN_CONT "%08lx ", c);
3466+ }
3467+ printk("\n");
3468+}
3469+#endif
3470+
3471 /*
3472 * First Level Translation Fault Handler
3473 *
3474@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3475 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3476 struct siginfo info;
3477
3478+#ifdef CONFIG_PAX_MEMORY_UDEREF
3479+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3480+ if (current->signal->curr_ip)
3481+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3482+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
3483+ else
3484+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3485+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()), addr);
3486+ goto die;
3487+ }
3488+#endif
3489+
3490 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3491 return;
3492
3493+die:
3494 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3495 inf->name, fsr, addr);
3496
3497@@ -575,9 +637,38 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3498 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3499 struct siginfo info;
3500
3501+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3502+ if (!user_mode(regs) && (is_domain_fault(ifsr) || is_xn_fault(ifsr))) {
3503+ if (current->signal->curr_ip)
3504+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3505+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
3506+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3507+ else
3508+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3509+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()),
3510+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3511+ goto die;
3512+ }
3513+#endif
3514+
3515+#ifdef CONFIG_PAX_REFCOUNT
3516+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3517+ unsigned int bkpt;
3518+
3519+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3520+ current->thread.error_code = ifsr;
3521+ current->thread.trap_no = 0;
3522+ pax_report_refcount_overflow(regs);
3523+ fixup_exception(regs);
3524+ return;
3525+ }
3526+ }
3527+#endif
3528+
3529 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3530 return;
3531
3532+die:
3533 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3534 inf->name, ifsr, addr);
3535
3536diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3537index cf08bdf..772656c 100644
3538--- a/arch/arm/mm/fault.h
3539+++ b/arch/arm/mm/fault.h
3540@@ -3,6 +3,7 @@
3541
3542 /*
3543 * Fault status register encodings. We steal bit 31 for our own purposes.
3544+ * Set when the FSR value is from an instruction fault.
3545 */
3546 #define FSR_LNX_PF (1 << 31)
3547 #define FSR_WRITE (1 << 11)
3548@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3549 }
3550 #endif
3551
3552+/* valid for LPAE and !LPAE */
3553+static inline int is_xn_fault(unsigned int fsr)
3554+{
3555+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3556+}
3557+
3558+static inline int is_domain_fault(unsigned int fsr)
3559+{
3560+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3561+}
3562+
3563 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3564 unsigned long search_exception_table(unsigned long addr);
3565
3566diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3567index ad722f1..763fdd3 100644
3568--- a/arch/arm/mm/init.c
3569+++ b/arch/arm/mm/init.c
3570@@ -30,6 +30,8 @@
3571 #include <asm/setup.h>
3572 #include <asm/tlb.h>
3573 #include <asm/fixmap.h>
3574+#include <asm/system_info.h>
3575+#include <asm/cp15.h>
3576
3577 #include <asm/mach/arch.h>
3578 #include <asm/mach/map.h>
3579@@ -736,7 +738,46 @@ void free_initmem(void)
3580 {
3581 #ifdef CONFIG_HAVE_TCM
3582 extern char __tcm_start, __tcm_end;
3583+#endif
3584
3585+#ifdef CONFIG_PAX_KERNEXEC
3586+ unsigned long addr;
3587+ pgd_t *pgd;
3588+ pud_t *pud;
3589+ pmd_t *pmd;
3590+ int cpu_arch = cpu_architecture();
3591+ unsigned int cr = get_cr();
3592+
3593+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3594+ /* make pages tables, etc before .text NX */
3595+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3596+ pgd = pgd_offset_k(addr);
3597+ pud = pud_offset(pgd, addr);
3598+ pmd = pmd_offset(pud, addr);
3599+ __section_update(pmd, addr, PMD_SECT_XN);
3600+ }
3601+ /* make init NX */
3602+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3603+ pgd = pgd_offset_k(addr);
3604+ pud = pud_offset(pgd, addr);
3605+ pmd = pmd_offset(pud, addr);
3606+ __section_update(pmd, addr, PMD_SECT_XN);
3607+ }
3608+ /* make kernel code/rodata RX */
3609+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3610+ pgd = pgd_offset_k(addr);
3611+ pud = pud_offset(pgd, addr);
3612+ pmd = pmd_offset(pud, addr);
3613+#ifdef CONFIG_ARM_LPAE
3614+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3615+#else
3616+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3617+#endif
3618+ }
3619+ }
3620+#endif
3621+
3622+#ifdef CONFIG_HAVE_TCM
3623 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3624 totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
3625 __phys_to_pfn(__pa(&__tcm_end)),
3626diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3627index 88fd86c..7a224ce 100644
3628--- a/arch/arm/mm/ioremap.c
3629+++ b/arch/arm/mm/ioremap.c
3630@@ -335,9 +335,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3631 unsigned int mtype;
3632
3633 if (cached)
3634- mtype = MT_MEMORY;
3635+ mtype = MT_MEMORY_RX;
3636 else
3637- mtype = MT_MEMORY_NONCACHED;
3638+ mtype = MT_MEMORY_NONCACHED_RX;
3639
3640 return __arm_ioremap_caller(phys_addr, size, mtype,
3641 __builtin_return_address(0));
3642diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3643index 10062ce..aa96dd7 100644
3644--- a/arch/arm/mm/mmap.c
3645+++ b/arch/arm/mm/mmap.c
3646@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3647 struct vm_area_struct *vma;
3648 int do_align = 0;
3649 int aliasing = cache_is_vipt_aliasing();
3650+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3651 struct vm_unmapped_area_info info;
3652
3653 /*
3654@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3655 if (len > TASK_SIZE)
3656 return -ENOMEM;
3657
3658+#ifdef CONFIG_PAX_RANDMMAP
3659+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3660+#endif
3661+
3662 if (addr) {
3663 if (do_align)
3664 addr = COLOUR_ALIGN(addr, pgoff);
3665@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3666 addr = PAGE_ALIGN(addr);
3667
3668 vma = find_vma(mm, addr);
3669- if (TASK_SIZE - len >= addr &&
3670- (!vma || addr + len <= vma->vm_start))
3671+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3672 return addr;
3673 }
3674
3675@@ -112,6 +116,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3676 unsigned long addr = addr0;
3677 int do_align = 0;
3678 int aliasing = cache_is_vipt_aliasing();
3679+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3680 struct vm_unmapped_area_info info;
3681
3682 /*
3683@@ -132,6 +137,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3684 return addr;
3685 }
3686
3687+#ifdef CONFIG_PAX_RANDMMAP
3688+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3689+#endif
3690+
3691 /* requesting a specific address */
3692 if (addr) {
3693 if (do_align)
3694@@ -139,8 +148,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3695 else
3696 addr = PAGE_ALIGN(addr);
3697 vma = find_vma(mm, addr);
3698- if (TASK_SIZE - len >= addr &&
3699- (!vma || addr + len <= vma->vm_start))
3700+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3701 return addr;
3702 }
3703
3704@@ -162,6 +170,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3705 VM_BUG_ON(addr != -ENOMEM);
3706 info.flags = 0;
3707 info.low_limit = mm->mmap_base;
3708+
3709+#ifdef CONFIG_PAX_RANDMMAP
3710+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3711+ info.low_limit += mm->delta_mmap;
3712+#endif
3713+
3714 info.high_limit = TASK_SIZE;
3715 addr = vm_unmapped_area(&info);
3716 }
3717@@ -173,6 +187,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3718 {
3719 unsigned long random_factor = 0UL;
3720
3721+#ifdef CONFIG_PAX_RANDMMAP
3722+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3723+#endif
3724+
3725 /* 8 bits of randomness in 20 address space bits */
3726 if ((current->flags & PF_RANDOMIZE) &&
3727 !(current->personality & ADDR_NO_RANDOMIZE))
3728@@ -180,10 +198,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3729
3730 if (mmap_is_legacy()) {
3731 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3732+
3733+#ifdef CONFIG_PAX_RANDMMAP
3734+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3735+ mm->mmap_base += mm->delta_mmap;
3736+#endif
3737+
3738 mm->get_unmapped_area = arch_get_unmapped_area;
3739 mm->unmap_area = arch_unmap_area;
3740 } else {
3741 mm->mmap_base = mmap_base(random_factor);
3742+
3743+#ifdef CONFIG_PAX_RANDMMAP
3744+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3745+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3746+#endif
3747+
3748 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3749 mm->unmap_area = arch_unmap_area_topdown;
3750 }
3751diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3752index ce328c7..f82bebb 100644
3753--- a/arch/arm/mm/mmu.c
3754+++ b/arch/arm/mm/mmu.c
3755@@ -35,6 +35,23 @@
3756
3757 #include "mm.h"
3758
3759+
3760+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3761+void modify_domain(unsigned int dom, unsigned int type)
3762+{
3763+ struct thread_info *thread = current_thread_info();
3764+ unsigned int domain = thread->cpu_domain;
3765+ /*
3766+ * DOMAIN_MANAGER might be defined to some other value,
3767+ * use the arch-defined constant
3768+ */
3769+ domain &= ~domain_val(dom, 3);
3770+ thread->cpu_domain = domain | domain_val(dom, type);
3771+ set_domain(thread->cpu_domain);
3772+}
3773+EXPORT_SYMBOL(modify_domain);
3774+#endif
3775+
3776 /*
3777 * empty_zero_page is a special page that is used for
3778 * zero-initialized data and COW.
3779@@ -195,10 +212,18 @@ void adjust_cr(unsigned long mask, unsigned long set)
3780 }
3781 #endif
3782
3783-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
3784+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
3785 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
3786
3787-static struct mem_type mem_types[] = {
3788+#ifdef CONFIG_PAX_KERNEXEC
3789+#define L_PTE_KERNEXEC L_PTE_RDONLY
3790+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
3791+#else
3792+#define L_PTE_KERNEXEC L_PTE_DIRTY
3793+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
3794+#endif
3795+
3796+static struct mem_type mem_types[] __read_only = {
3797 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
3798 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
3799 L_PTE_SHARED,
3800@@ -227,16 +252,16 @@ static struct mem_type mem_types[] = {
3801 [MT_UNCACHED] = {
3802 .prot_pte = PROT_PTE_DEVICE,
3803 .prot_l1 = PMD_TYPE_TABLE,
3804- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3805+ .prot_sect = PROT_SECT_DEVICE,
3806 .domain = DOMAIN_IO,
3807 },
3808 [MT_CACHECLEAN] = {
3809- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3810+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3811 .domain = DOMAIN_KERNEL,
3812 },
3813 #ifndef CONFIG_ARM_LPAE
3814 [MT_MINICLEAN] = {
3815- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
3816+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
3817 .domain = DOMAIN_KERNEL,
3818 },
3819 #endif
3820@@ -244,36 +269,54 @@ static struct mem_type mem_types[] = {
3821 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3822 L_PTE_RDONLY,
3823 .prot_l1 = PMD_TYPE_TABLE,
3824- .domain = DOMAIN_USER,
3825+ .domain = DOMAIN_VECTORS,
3826 },
3827 [MT_HIGH_VECTORS] = {
3828 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3829 L_PTE_USER | L_PTE_RDONLY,
3830 .prot_l1 = PMD_TYPE_TABLE,
3831- .domain = DOMAIN_USER,
3832+ .domain = DOMAIN_VECTORS,
3833 },
3834- [MT_MEMORY] = {
3835+ [MT_MEMORY_RWX] = {
3836 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3837 .prot_l1 = PMD_TYPE_TABLE,
3838 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3839 .domain = DOMAIN_KERNEL,
3840 },
3841+ [MT_MEMORY_RW] = {
3842+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3843+ .prot_l1 = PMD_TYPE_TABLE,
3844+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3845+ .domain = DOMAIN_KERNEL,
3846+ },
3847+ [MT_MEMORY_RX] = {
3848+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
3849+ .prot_l1 = PMD_TYPE_TABLE,
3850+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
3851+ .domain = DOMAIN_KERNEL,
3852+ },
3853 [MT_ROM] = {
3854- .prot_sect = PMD_TYPE_SECT,
3855+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3856 .domain = DOMAIN_KERNEL,
3857 },
3858- [MT_MEMORY_NONCACHED] = {
3859+ [MT_MEMORY_NONCACHED_RW] = {
3860 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3861 L_PTE_MT_BUFFERABLE,
3862 .prot_l1 = PMD_TYPE_TABLE,
3863 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
3864 .domain = DOMAIN_KERNEL,
3865 },
3866+ [MT_MEMORY_NONCACHED_RX] = {
3867+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
3868+ L_PTE_MT_BUFFERABLE,
3869+ .prot_l1 = PMD_TYPE_TABLE,
3870+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
3871+ .domain = DOMAIN_KERNEL,
3872+ },
3873 [MT_MEMORY_DTCM] = {
3874- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3875- L_PTE_XN,
3876+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
3877 .prot_l1 = PMD_TYPE_TABLE,
3878- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
3879+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
3880 .domain = DOMAIN_KERNEL,
3881 },
3882 [MT_MEMORY_ITCM] = {
3883@@ -283,10 +326,10 @@ static struct mem_type mem_types[] = {
3884 },
3885 [MT_MEMORY_SO] = {
3886 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
3887- L_PTE_MT_UNCACHED | L_PTE_XN,
3888+ L_PTE_MT_UNCACHED,
3889 .prot_l1 = PMD_TYPE_TABLE,
3890 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
3891- PMD_SECT_UNCACHED | PMD_SECT_XN,
3892+ PMD_SECT_UNCACHED,
3893 .domain = DOMAIN_KERNEL,
3894 },
3895 [MT_MEMORY_DMA_READY] = {
3896@@ -371,9 +414,35 @@ static void __init build_mem_type_table(void)
3897 * to prevent speculative instruction fetches.
3898 */
3899 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
3900+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
3901 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
3902+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
3903 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
3904+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
3905 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
3906+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
3907+
3908+ /* Mark other regions on ARMv6+ as execute-never */
3909+
3910+#ifdef CONFIG_PAX_KERNEXEC
3911+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
3912+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
3913+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
3914+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
3915+#ifndef CONFIG_ARM_LPAE
3916+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
3917+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
3918+#endif
3919+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
3920+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
3921+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
3922+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
3923+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
3924+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
3925+#endif
3926+
3927+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
3928+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
3929 }
3930 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
3931 /*
3932@@ -432,6 +501,9 @@ static void __init build_mem_type_table(void)
3933 * from SVC mode and no access from userspace.
3934 */
3935 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3936+#ifdef CONFIG_PAX_KERNEXEC
3937+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3938+#endif
3939 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3940 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
3941 #endif
3942@@ -448,11 +520,17 @@ static void __init build_mem_type_table(void)
3943 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
3944 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
3945 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
3946- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
3947- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
3948+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
3949+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
3950+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
3951+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
3952+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
3953+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
3954 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
3955- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
3956- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
3957+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
3958+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
3959+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
3960+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
3961 }
3962 }
3963
3964@@ -463,15 +541,20 @@ static void __init build_mem_type_table(void)
3965 if (cpu_arch >= CPU_ARCH_ARMv6) {
3966 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
3967 /* Non-cacheable Normal is XCB = 001 */
3968- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
3969+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
3970+ PMD_SECT_BUFFERED;
3971+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
3972 PMD_SECT_BUFFERED;
3973 } else {
3974 /* For both ARMv6 and non-TEX-remapping ARMv7 */
3975- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
3976+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
3977+ PMD_SECT_TEX(1);
3978+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
3979 PMD_SECT_TEX(1);
3980 }
3981 } else {
3982- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
3983+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
3984+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
3985 }
3986
3987 #ifdef CONFIG_ARM_LPAE
3988@@ -487,6 +570,8 @@ static void __init build_mem_type_table(void)
3989 vecs_pgprot |= PTE_EXT_AF;
3990 #endif
3991
3992+ user_pgprot |= __supported_pte_mask;
3993+
3994 for (i = 0; i < 16; i++) {
3995 pteval_t v = pgprot_val(protection_map[i]);
3996 protection_map[i] = __pgprot(v | user_pgprot);
3997@@ -501,10 +586,15 @@ static void __init build_mem_type_table(void)
3998
3999 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4000 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4001- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4002- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4003+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4004+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4005+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4006+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4007+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4008+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4009 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4010- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4011+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4012+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4013 mem_types[MT_ROM].prot_sect |= cp->pmd;
4014
4015 switch (cp->pmd) {
4016@@ -1105,18 +1195,15 @@ void __init arm_mm_memblock_reserve(void)
4017 * called function. This means you can't use any function or debugging
4018 * method which may touch any device, otherwise the kernel _will_ crash.
4019 */
4020+
4021+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4022+
4023 static void __init devicemaps_init(struct machine_desc *mdesc)
4024 {
4025 struct map_desc map;
4026 unsigned long addr;
4027- void *vectors;
4028
4029- /*
4030- * Allocate the vector page early.
4031- */
4032- vectors = early_alloc(PAGE_SIZE);
4033-
4034- early_trap_init(vectors);
4035+ early_trap_init(&vectors);
4036
4037 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4038 pmd_clear(pmd_off_k(addr));
4039@@ -1156,7 +1243,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4040 * location (0xffff0000). If we aren't using high-vectors, also
4041 * create a mapping at the low-vectors virtual address.
4042 */
4043- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4044+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4045 map.virtual = 0xffff0000;
4046 map.length = PAGE_SIZE;
4047 map.type = MT_HIGH_VECTORS;
4048@@ -1214,8 +1301,39 @@ static void __init map_lowmem(void)
4049 map.pfn = __phys_to_pfn(start);
4050 map.virtual = __phys_to_virt(start);
4051 map.length = end - start;
4052- map.type = MT_MEMORY;
4053
4054+#ifdef CONFIG_PAX_KERNEXEC
4055+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4056+ struct map_desc kernel;
4057+ struct map_desc initmap;
4058+
4059+ /* when freeing initmem we will make this RW */
4060+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4061+ initmap.virtual = (unsigned long)__init_begin;
4062+ initmap.length = _sdata - __init_begin;
4063+ initmap.type = MT_MEMORY_RWX;
4064+ create_mapping(&initmap);
4065+
4066+ /* when freeing initmem we will make this RX */
4067+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4068+ kernel.virtual = (unsigned long)_stext;
4069+ kernel.length = __init_begin - _stext;
4070+ kernel.type = MT_MEMORY_RWX;
4071+ create_mapping(&kernel);
4072+
4073+ if (map.virtual < (unsigned long)_stext) {
4074+ map.length = (unsigned long)_stext - map.virtual;
4075+ map.type = MT_MEMORY_RWX;
4076+ create_mapping(&map);
4077+ }
4078+
4079+ map.pfn = __phys_to_pfn(__pa(_sdata));
4080+ map.virtual = (unsigned long)_sdata;
4081+ map.length = end - __pa(_sdata);
4082+ }
4083+#endif
4084+
4085+ map.type = MT_MEMORY_RW;
4086 create_mapping(&map);
4087 }
4088 }
4089diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4090index 6d98c13..3cfb174 100644
4091--- a/arch/arm/mm/proc-v7-2level.S
4092+++ b/arch/arm/mm/proc-v7-2level.S
4093@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4094 tst r1, #L_PTE_XN
4095 orrne r3, r3, #PTE_EXT_XN
4096
4097+ tst r1, #L_PTE_PXN
4098+ orrne r3, r3, #PTE_EXT_PXN
4099+
4100 tst r1, #L_PTE_YOUNG
4101 tstne r1, #L_PTE_VALID
4102 #ifndef CONFIG_CPU_USE_DOMAINS
4103diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4104index a5bc92d..0bb4730 100644
4105--- a/arch/arm/plat-omap/sram.c
4106+++ b/arch/arm/plat-omap/sram.c
4107@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4108 * Looks like we need to preserve some bootloader code at the
4109 * beginning of SRAM for jumping to flash for reboot to work...
4110 */
4111+ pax_open_kernel();
4112 memset_io(omap_sram_base + omap_sram_skip, 0,
4113 omap_sram_size - omap_sram_skip);
4114+ pax_close_kernel();
4115 }
4116diff --git a/arch/arm/plat-orion/include/plat/addr-map.h b/arch/arm/plat-orion/include/plat/addr-map.h
4117index b76c065..b6e766b 100644
4118--- a/arch/arm/plat-orion/include/plat/addr-map.h
4119+++ b/arch/arm/plat-orion/include/plat/addr-map.h
4120@@ -27,7 +27,7 @@ struct orion_addr_map_cfg {
4121 value in bridge_virt_base */
4122 void __iomem *(*win_cfg_base) (const struct orion_addr_map_cfg *cfg,
4123 const int win);
4124-};
4125+} __no_const;
4126
4127 /*
4128 * Information needed to setup one address mapping.
4129diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4130index f5144cd..71f6d1f 100644
4131--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4132+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4133@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4134 int (*started)(unsigned ch);
4135 int (*flush)(unsigned ch);
4136 int (*stop)(unsigned ch);
4137-};
4138+} __no_const;
4139
4140 extern void *samsung_dmadev_get_ops(void);
4141 extern void *s3c_dma_get_ops(void);
4142diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4143index 0c3ba9f..95722b3 100644
4144--- a/arch/arm64/kernel/debug-monitors.c
4145+++ b/arch/arm64/kernel/debug-monitors.c
4146@@ -151,7 +151,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4147 return NOTIFY_OK;
4148 }
4149
4150-static struct notifier_block __cpuinitdata os_lock_nb = {
4151+static struct notifier_block os_lock_nb = {
4152 .notifier_call = os_lock_notify,
4153 };
4154
4155diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4156index 5ab825c..96aaec8 100644
4157--- a/arch/arm64/kernel/hw_breakpoint.c
4158+++ b/arch/arm64/kernel/hw_breakpoint.c
4159@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4160 return NOTIFY_OK;
4161 }
4162
4163-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4164+static struct notifier_block hw_breakpoint_reset_nb = {
4165 .notifier_call = hw_breakpoint_reset_notify,
4166 };
4167
4168diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4169index c3a58a1..78fbf54 100644
4170--- a/arch/avr32/include/asm/cache.h
4171+++ b/arch/avr32/include/asm/cache.h
4172@@ -1,8 +1,10 @@
4173 #ifndef __ASM_AVR32_CACHE_H
4174 #define __ASM_AVR32_CACHE_H
4175
4176+#include <linux/const.h>
4177+
4178 #define L1_CACHE_SHIFT 5
4179-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4180+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4181
4182 /*
4183 * Memory returned by kmalloc() may be used for DMA, so we must make
4184diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4185index e2c3287..6c4f98c 100644
4186--- a/arch/avr32/include/asm/elf.h
4187+++ b/arch/avr32/include/asm/elf.h
4188@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4189 the loader. We need to make sure that it is out of the way of the program
4190 that it will "exec", and that there is sufficient room for the brk. */
4191
4192-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4193+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4194
4195+#ifdef CONFIG_PAX_ASLR
4196+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4197+
4198+#define PAX_DELTA_MMAP_LEN 15
4199+#define PAX_DELTA_STACK_LEN 15
4200+#endif
4201
4202 /* This yields a mask that user programs can use to figure out what
4203 instruction set this CPU supports. This could be done in user space,
4204diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4205index 479330b..53717a8 100644
4206--- a/arch/avr32/include/asm/kmap_types.h
4207+++ b/arch/avr32/include/asm/kmap_types.h
4208@@ -2,9 +2,9 @@
4209 #define __ASM_AVR32_KMAP_TYPES_H
4210
4211 #ifdef CONFIG_DEBUG_HIGHMEM
4212-# define KM_TYPE_NR 29
4213+# define KM_TYPE_NR 30
4214 #else
4215-# define KM_TYPE_NR 14
4216+# define KM_TYPE_NR 15
4217 #endif
4218
4219 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4220diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4221index b2f2d2d..d1c85cb 100644
4222--- a/arch/avr32/mm/fault.c
4223+++ b/arch/avr32/mm/fault.c
4224@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4225
4226 int exception_trace = 1;
4227
4228+#ifdef CONFIG_PAX_PAGEEXEC
4229+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4230+{
4231+ unsigned long i;
4232+
4233+ printk(KERN_ERR "PAX: bytes at PC: ");
4234+ for (i = 0; i < 20; i++) {
4235+ unsigned char c;
4236+ if (get_user(c, (unsigned char *)pc+i))
4237+ printk(KERN_CONT "???????? ");
4238+ else
4239+ printk(KERN_CONT "%02x ", c);
4240+ }
4241+ printk("\n");
4242+}
4243+#endif
4244+
4245 /*
4246 * This routine handles page faults. It determines the address and the
4247 * problem, and then passes it off to one of the appropriate routines.
4248@@ -174,6 +191,16 @@ bad_area:
4249 up_read(&mm->mmap_sem);
4250
4251 if (user_mode(regs)) {
4252+
4253+#ifdef CONFIG_PAX_PAGEEXEC
4254+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4255+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4256+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4257+ do_group_exit(SIGKILL);
4258+ }
4259+ }
4260+#endif
4261+
4262 if (exception_trace && printk_ratelimit())
4263 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4264 "sp %08lx ecr %lu\n",
4265diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4266index 568885a..f8008df 100644
4267--- a/arch/blackfin/include/asm/cache.h
4268+++ b/arch/blackfin/include/asm/cache.h
4269@@ -7,6 +7,7 @@
4270 #ifndef __ARCH_BLACKFIN_CACHE_H
4271 #define __ARCH_BLACKFIN_CACHE_H
4272
4273+#include <linux/const.h>
4274 #include <linux/linkage.h> /* for asmlinkage */
4275
4276 /*
4277@@ -14,7 +15,7 @@
4278 * Blackfin loads 32 bytes for cache
4279 */
4280 #define L1_CACHE_SHIFT 5
4281-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4282+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4283 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4284
4285 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4286diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4287index aea2718..3639a60 100644
4288--- a/arch/cris/include/arch-v10/arch/cache.h
4289+++ b/arch/cris/include/arch-v10/arch/cache.h
4290@@ -1,8 +1,9 @@
4291 #ifndef _ASM_ARCH_CACHE_H
4292 #define _ASM_ARCH_CACHE_H
4293
4294+#include <linux/const.h>
4295 /* Etrax 100LX have 32-byte cache-lines. */
4296-#define L1_CACHE_BYTES 32
4297 #define L1_CACHE_SHIFT 5
4298+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4299
4300 #endif /* _ASM_ARCH_CACHE_H */
4301diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4302index 7caf25d..ee65ac5 100644
4303--- a/arch/cris/include/arch-v32/arch/cache.h
4304+++ b/arch/cris/include/arch-v32/arch/cache.h
4305@@ -1,11 +1,12 @@
4306 #ifndef _ASM_CRIS_ARCH_CACHE_H
4307 #define _ASM_CRIS_ARCH_CACHE_H
4308
4309+#include <linux/const.h>
4310 #include <arch/hwregs/dma.h>
4311
4312 /* A cache-line is 32 bytes. */
4313-#define L1_CACHE_BYTES 32
4314 #define L1_CACHE_SHIFT 5
4315+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4316
4317 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4318
4319diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4320index b86329d..6709906 100644
4321--- a/arch/frv/include/asm/atomic.h
4322+++ b/arch/frv/include/asm/atomic.h
4323@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4324 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4325 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4326
4327+#define atomic64_read_unchecked(v) atomic64_read(v)
4328+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4329+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4330+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4331+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4332+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4333+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4334+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4335+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4336+
4337 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4338 {
4339 int c, old;
4340diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4341index 2797163..c2a401d 100644
4342--- a/arch/frv/include/asm/cache.h
4343+++ b/arch/frv/include/asm/cache.h
4344@@ -12,10 +12,11 @@
4345 #ifndef __ASM_CACHE_H
4346 #define __ASM_CACHE_H
4347
4348+#include <linux/const.h>
4349
4350 /* bytes per L1 cache line */
4351 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4352-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4353+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4354
4355 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4356 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4357diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4358index 43901f2..0d8b865 100644
4359--- a/arch/frv/include/asm/kmap_types.h
4360+++ b/arch/frv/include/asm/kmap_types.h
4361@@ -2,6 +2,6 @@
4362 #ifndef _ASM_KMAP_TYPES_H
4363 #define _ASM_KMAP_TYPES_H
4364
4365-#define KM_TYPE_NR 17
4366+#define KM_TYPE_NR 18
4367
4368 #endif
4369diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4370index 385fd30..3aaf4fe 100644
4371--- a/arch/frv/mm/elf-fdpic.c
4372+++ b/arch/frv/mm/elf-fdpic.c
4373@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4374 {
4375 struct vm_area_struct *vma;
4376 unsigned long limit;
4377+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4378
4379 if (len > TASK_SIZE)
4380 return -ENOMEM;
4381@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4382 if (addr) {
4383 addr = PAGE_ALIGN(addr);
4384 vma = find_vma(current->mm, addr);
4385- if (TASK_SIZE - len >= addr &&
4386- (!vma || addr + len <= vma->vm_start))
4387+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4388 goto success;
4389 }
4390
4391@@ -89,7 +89,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4392 for (; vma; vma = vma->vm_next) {
4393 if (addr > limit)
4394 break;
4395- if (addr + len <= vma->vm_start)
4396+ if (check_heap_stack_gap(vma, addr, len, offset))
4397 goto success;
4398 addr = vma->vm_end;
4399 }
4400@@ -104,7 +104,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4401 for (; vma; vma = vma->vm_next) {
4402 if (addr > limit)
4403 break;
4404- if (addr + len <= vma->vm_start)
4405+ if (check_heap_stack_gap(vma, addr, len, offset))
4406 goto success;
4407 addr = vma->vm_end;
4408 }
4409diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4410index f4ca594..adc72fd6 100644
4411--- a/arch/hexagon/include/asm/cache.h
4412+++ b/arch/hexagon/include/asm/cache.h
4413@@ -21,9 +21,11 @@
4414 #ifndef __ASM_CACHE_H
4415 #define __ASM_CACHE_H
4416
4417+#include <linux/const.h>
4418+
4419 /* Bytes per L1 cache line */
4420-#define L1_CACHE_SHIFT (5)
4421-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4422+#define L1_CACHE_SHIFT 5
4423+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4424
4425 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4426 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4427diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4428index 6e6fe18..a6ae668 100644
4429--- a/arch/ia64/include/asm/atomic.h
4430+++ b/arch/ia64/include/asm/atomic.h
4431@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4432 #define atomic64_inc(v) atomic64_add(1, (v))
4433 #define atomic64_dec(v) atomic64_sub(1, (v))
4434
4435+#define atomic64_read_unchecked(v) atomic64_read(v)
4436+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4437+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4438+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4439+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4440+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4441+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4442+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4443+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4444+
4445 /* Atomic operations are already serializing */
4446 #define smp_mb__before_atomic_dec() barrier()
4447 #define smp_mb__after_atomic_dec() barrier()
4448diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4449index 988254a..e1ee885 100644
4450--- a/arch/ia64/include/asm/cache.h
4451+++ b/arch/ia64/include/asm/cache.h
4452@@ -1,6 +1,7 @@
4453 #ifndef _ASM_IA64_CACHE_H
4454 #define _ASM_IA64_CACHE_H
4455
4456+#include <linux/const.h>
4457
4458 /*
4459 * Copyright (C) 1998-2000 Hewlett-Packard Co
4460@@ -9,7 +10,7 @@
4461
4462 /* Bytes per L1 (data) cache line. */
4463 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4464-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4465+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4466
4467 #ifdef CONFIG_SMP
4468 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4469diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4470index b5298eb..67c6e62 100644
4471--- a/arch/ia64/include/asm/elf.h
4472+++ b/arch/ia64/include/asm/elf.h
4473@@ -42,6 +42,13 @@
4474 */
4475 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4476
4477+#ifdef CONFIG_PAX_ASLR
4478+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4479+
4480+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4481+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4482+#endif
4483+
4484 #define PT_IA_64_UNWIND 0x70000001
4485
4486 /* IA-64 relocations: */
4487diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4488index 96a8d92..617a1cf 100644
4489--- a/arch/ia64/include/asm/pgalloc.h
4490+++ b/arch/ia64/include/asm/pgalloc.h
4491@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4492 pgd_val(*pgd_entry) = __pa(pud);
4493 }
4494
4495+static inline void
4496+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4497+{
4498+ pgd_populate(mm, pgd_entry, pud);
4499+}
4500+
4501 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4502 {
4503 return quicklist_alloc(0, GFP_KERNEL, NULL);
4504@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4505 pud_val(*pud_entry) = __pa(pmd);
4506 }
4507
4508+static inline void
4509+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4510+{
4511+ pud_populate(mm, pud_entry, pmd);
4512+}
4513+
4514 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4515 {
4516 return quicklist_alloc(0, GFP_KERNEL, NULL);
4517diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4518index 815810c..d60bd4c 100644
4519--- a/arch/ia64/include/asm/pgtable.h
4520+++ b/arch/ia64/include/asm/pgtable.h
4521@@ -12,7 +12,7 @@
4522 * David Mosberger-Tang <davidm@hpl.hp.com>
4523 */
4524
4525-
4526+#include <linux/const.h>
4527 #include <asm/mman.h>
4528 #include <asm/page.h>
4529 #include <asm/processor.h>
4530@@ -142,6 +142,17 @@
4531 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4532 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4533 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4534+
4535+#ifdef CONFIG_PAX_PAGEEXEC
4536+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4537+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4538+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4539+#else
4540+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4541+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4542+# define PAGE_COPY_NOEXEC PAGE_COPY
4543+#endif
4544+
4545 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4546 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4547 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4548diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4549index 54ff557..70c88b7 100644
4550--- a/arch/ia64/include/asm/spinlock.h
4551+++ b/arch/ia64/include/asm/spinlock.h
4552@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4553 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4554
4555 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4556- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4557+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4558 }
4559
4560 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4561diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4562index 449c8c0..50cdf87 100644
4563--- a/arch/ia64/include/asm/uaccess.h
4564+++ b/arch/ia64/include/asm/uaccess.h
4565@@ -42,6 +42,8 @@
4566 #include <asm/pgtable.h>
4567 #include <asm/io.h>
4568
4569+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4570+
4571 /*
4572 * For historical reasons, the following macros are grossly misnamed:
4573 */
4574@@ -240,12 +242,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4575 static inline unsigned long
4576 __copy_to_user (void __user *to, const void *from, unsigned long count)
4577 {
4578+ if (count > INT_MAX)
4579+ return count;
4580+
4581+ if (!__builtin_constant_p(count))
4582+ check_object_size(from, count, true);
4583+
4584 return __copy_user(to, (__force void __user *) from, count);
4585 }
4586
4587 static inline unsigned long
4588 __copy_from_user (void *to, const void __user *from, unsigned long count)
4589 {
4590+ if (count > INT_MAX)
4591+ return count;
4592+
4593+ if (!__builtin_constant_p(count))
4594+ check_object_size(to, count, false);
4595+
4596 return __copy_user((__force void __user *) to, from, count);
4597 }
4598
4599@@ -255,10 +269,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4600 ({ \
4601 void __user *__cu_to = (to); \
4602 const void *__cu_from = (from); \
4603- long __cu_len = (n); \
4604+ unsigned long __cu_len = (n); \
4605 \
4606- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4607+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4608+ if (!__builtin_constant_p(n)) \
4609+ check_object_size(__cu_from, __cu_len, true); \
4610 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4611+ } \
4612 __cu_len; \
4613 })
4614
4615@@ -266,11 +283,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4616 ({ \
4617 void *__cu_to = (to); \
4618 const void __user *__cu_from = (from); \
4619- long __cu_len = (n); \
4620+ unsigned long __cu_len = (n); \
4621 \
4622 __chk_user_ptr(__cu_from); \
4623- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4624+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4625+ if (!__builtin_constant_p(n)) \
4626+ check_object_size(__cu_to, __cu_len, false); \
4627 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4628+ } \
4629 __cu_len; \
4630 })
4631
4632diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4633index 2d67317..07d8bfa 100644
4634--- a/arch/ia64/kernel/err_inject.c
4635+++ b/arch/ia64/kernel/err_inject.c
4636@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4637 return NOTIFY_OK;
4638 }
4639
4640-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4641+static struct notifier_block err_inject_cpu_notifier =
4642 {
4643 .notifier_call = err_inject_cpu_callback,
4644 };
4645diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4646index 65bf9cd..794f06b 100644
4647--- a/arch/ia64/kernel/mca.c
4648+++ b/arch/ia64/kernel/mca.c
4649@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4650 return NOTIFY_OK;
4651 }
4652
4653-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4654+static struct notifier_block mca_cpu_notifier = {
4655 .notifier_call = mca_cpu_callback
4656 };
4657
4658diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4659index 24603be..948052d 100644
4660--- a/arch/ia64/kernel/module.c
4661+++ b/arch/ia64/kernel/module.c
4662@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4663 void
4664 module_free (struct module *mod, void *module_region)
4665 {
4666- if (mod && mod->arch.init_unw_table &&
4667- module_region == mod->module_init) {
4668+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4669 unw_remove_unwind_table(mod->arch.init_unw_table);
4670 mod->arch.init_unw_table = NULL;
4671 }
4672@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4673 }
4674
4675 static inline int
4676+in_init_rx (const struct module *mod, uint64_t addr)
4677+{
4678+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4679+}
4680+
4681+static inline int
4682+in_init_rw (const struct module *mod, uint64_t addr)
4683+{
4684+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4685+}
4686+
4687+static inline int
4688 in_init (const struct module *mod, uint64_t addr)
4689 {
4690- return addr - (uint64_t) mod->module_init < mod->init_size;
4691+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4692+}
4693+
4694+static inline int
4695+in_core_rx (const struct module *mod, uint64_t addr)
4696+{
4697+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4698+}
4699+
4700+static inline int
4701+in_core_rw (const struct module *mod, uint64_t addr)
4702+{
4703+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4704 }
4705
4706 static inline int
4707 in_core (const struct module *mod, uint64_t addr)
4708 {
4709- return addr - (uint64_t) mod->module_core < mod->core_size;
4710+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4711 }
4712
4713 static inline int
4714@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4715 break;
4716
4717 case RV_BDREL:
4718- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4719+ if (in_init_rx(mod, val))
4720+ val -= (uint64_t) mod->module_init_rx;
4721+ else if (in_init_rw(mod, val))
4722+ val -= (uint64_t) mod->module_init_rw;
4723+ else if (in_core_rx(mod, val))
4724+ val -= (uint64_t) mod->module_core_rx;
4725+ else if (in_core_rw(mod, val))
4726+ val -= (uint64_t) mod->module_core_rw;
4727 break;
4728
4729 case RV_LTV:
4730@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4731 * addresses have been selected...
4732 */
4733 uint64_t gp;
4734- if (mod->core_size > MAX_LTOFF)
4735+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4736 /*
4737 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4738 * at the end of the module.
4739 */
4740- gp = mod->core_size - MAX_LTOFF / 2;
4741+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4742 else
4743- gp = mod->core_size / 2;
4744- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4745+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4746+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4747 mod->arch.gp = gp;
4748 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4749 }
4750diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4751index 77597e5..6f28f3f 100644
4752--- a/arch/ia64/kernel/palinfo.c
4753+++ b/arch/ia64/kernel/palinfo.c
4754@@ -1045,7 +1045,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4755 return NOTIFY_OK;
4756 }
4757
4758-static struct notifier_block __refdata palinfo_cpu_notifier =
4759+static struct notifier_block palinfo_cpu_notifier =
4760 {
4761 .notifier_call = palinfo_cpu_callback,
4762 .priority = 0,
4763diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4764index 79802e5..1a89ec5 100644
4765--- a/arch/ia64/kernel/salinfo.c
4766+++ b/arch/ia64/kernel/salinfo.c
4767@@ -616,7 +616,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4768 return NOTIFY_OK;
4769 }
4770
4771-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4772+static struct notifier_block salinfo_cpu_notifier =
4773 {
4774 .notifier_call = salinfo_cpu_callback,
4775 .priority = 0,
4776diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4777index d9439ef..d0cac6b 100644
4778--- a/arch/ia64/kernel/sys_ia64.c
4779+++ b/arch/ia64/kernel/sys_ia64.c
4780@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4781 unsigned long start_addr, align_mask = PAGE_SIZE - 1;
4782 struct mm_struct *mm = current->mm;
4783 struct vm_area_struct *vma;
4784+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4785
4786 if (len > RGN_MAP_LIMIT)
4787 return -ENOMEM;
4788@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4789 if (REGION_NUMBER(addr) == RGN_HPAGE)
4790 addr = 0;
4791 #endif
4792+
4793+#ifdef CONFIG_PAX_RANDMMAP
4794+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4795+ addr = mm->free_area_cache;
4796+ else
4797+#endif
4798+
4799 if (!addr)
4800 addr = mm->free_area_cache;
4801
4802@@ -61,14 +69,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4803 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
4804 /* At this point: (!vma || addr < vma->vm_end). */
4805 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
4806- if (start_addr != TASK_UNMAPPED_BASE) {
4807+ if (start_addr != mm->mmap_base) {
4808 /* Start a new search --- just in case we missed some holes. */
4809- addr = TASK_UNMAPPED_BASE;
4810+ addr = mm->mmap_base;
4811 goto full_search;
4812 }
4813 return -ENOMEM;
4814 }
4815- if (!vma || addr + len <= vma->vm_start) {
4816+ if (check_heap_stack_gap(vma, addr, len, offset)) {
4817 /* Remember the address where we stopped this search: */
4818 mm->free_area_cache = addr + len;
4819 return addr;
4820diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
4821index dc00b2c..cce53c2 100644
4822--- a/arch/ia64/kernel/topology.c
4823+++ b/arch/ia64/kernel/topology.c
4824@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
4825 return NOTIFY_OK;
4826 }
4827
4828-static struct notifier_block __cpuinitdata cache_cpu_notifier =
4829+static struct notifier_block cache_cpu_notifier =
4830 {
4831 .notifier_call = cache_cpu_callback
4832 };
4833diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
4834index 0ccb28f..8992469 100644
4835--- a/arch/ia64/kernel/vmlinux.lds.S
4836+++ b/arch/ia64/kernel/vmlinux.lds.S
4837@@ -198,7 +198,7 @@ SECTIONS {
4838 /* Per-cpu data: */
4839 . = ALIGN(PERCPU_PAGE_SIZE);
4840 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
4841- __phys_per_cpu_start = __per_cpu_load;
4842+ __phys_per_cpu_start = per_cpu_load;
4843 /*
4844 * ensure percpu data fits
4845 * into percpu page size
4846diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
4847index 6cf0341..d352594 100644
4848--- a/arch/ia64/mm/fault.c
4849+++ b/arch/ia64/mm/fault.c
4850@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
4851 return pte_present(pte);
4852 }
4853
4854+#ifdef CONFIG_PAX_PAGEEXEC
4855+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4856+{
4857+ unsigned long i;
4858+
4859+ printk(KERN_ERR "PAX: bytes at PC: ");
4860+ for (i = 0; i < 8; i++) {
4861+ unsigned int c;
4862+ if (get_user(c, (unsigned int *)pc+i))
4863+ printk(KERN_CONT "???????? ");
4864+ else
4865+ printk(KERN_CONT "%08x ", c);
4866+ }
4867+ printk("\n");
4868+}
4869+#endif
4870+
4871 # define VM_READ_BIT 0
4872 # define VM_WRITE_BIT 1
4873 # define VM_EXEC_BIT 2
4874@@ -149,8 +166,21 @@ retry:
4875 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
4876 goto bad_area;
4877
4878- if ((vma->vm_flags & mask) != mask)
4879+ if ((vma->vm_flags & mask) != mask) {
4880+
4881+#ifdef CONFIG_PAX_PAGEEXEC
4882+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
4883+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
4884+ goto bad_area;
4885+
4886+ up_read(&mm->mmap_sem);
4887+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
4888+ do_group_exit(SIGKILL);
4889+ }
4890+#endif
4891+
4892 goto bad_area;
4893+ }
4894
4895 /*
4896 * If for any reason at all we couldn't handle the fault, make
4897diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
4898index 5ca674b..127c3cb 100644
4899--- a/arch/ia64/mm/hugetlbpage.c
4900+++ b/arch/ia64/mm/hugetlbpage.c
4901@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
4902 unsigned long pgoff, unsigned long flags)
4903 {
4904 struct vm_area_struct *vmm;
4905+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
4906
4907 if (len > RGN_MAP_LIMIT)
4908 return -ENOMEM;
4909@@ -171,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
4910 /* At this point: (!vmm || addr < vmm->vm_end). */
4911 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
4912 return -ENOMEM;
4913- if (!vmm || (addr + len) <= vmm->vm_start)
4914+ if (check_heap_stack_gap(vmm, addr, len, offset))
4915 return addr;
4916 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
4917 }
4918diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
4919index b755ea9..b9a969e 100644
4920--- a/arch/ia64/mm/init.c
4921+++ b/arch/ia64/mm/init.c
4922@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
4923 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
4924 vma->vm_end = vma->vm_start + PAGE_SIZE;
4925 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
4926+
4927+#ifdef CONFIG_PAX_PAGEEXEC
4928+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
4929+ vma->vm_flags &= ~VM_EXEC;
4930+
4931+#ifdef CONFIG_PAX_MPROTECT
4932+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
4933+ vma->vm_flags &= ~VM_MAYEXEC;
4934+#endif
4935+
4936+ }
4937+#endif
4938+
4939 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4940 down_write(&current->mm->mmap_sem);
4941 if (insert_vm_struct(current->mm, vma)) {
4942diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
4943index 40b3ee9..8c2c112 100644
4944--- a/arch/m32r/include/asm/cache.h
4945+++ b/arch/m32r/include/asm/cache.h
4946@@ -1,8 +1,10 @@
4947 #ifndef _ASM_M32R_CACHE_H
4948 #define _ASM_M32R_CACHE_H
4949
4950+#include <linux/const.h>
4951+
4952 /* L1 cache line size */
4953 #define L1_CACHE_SHIFT 4
4954-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4955+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4956
4957 #endif /* _ASM_M32R_CACHE_H */
4958diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
4959index 82abd15..d95ae5d 100644
4960--- a/arch/m32r/lib/usercopy.c
4961+++ b/arch/m32r/lib/usercopy.c
4962@@ -14,6 +14,9 @@
4963 unsigned long
4964 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
4965 {
4966+ if ((long)n < 0)
4967+ return n;
4968+
4969 prefetch(from);
4970 if (access_ok(VERIFY_WRITE, to, n))
4971 __copy_user(to,from,n);
4972@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
4973 unsigned long
4974 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
4975 {
4976+ if ((long)n < 0)
4977+ return n;
4978+
4979 prefetchw(to);
4980 if (access_ok(VERIFY_READ, from, n))
4981 __copy_user_zeroing(to,from,n);
4982diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
4983index 0395c51..5f26031 100644
4984--- a/arch/m68k/include/asm/cache.h
4985+++ b/arch/m68k/include/asm/cache.h
4986@@ -4,9 +4,11 @@
4987 #ifndef __ARCH_M68K_CACHE_H
4988 #define __ARCH_M68K_CACHE_H
4989
4990+#include <linux/const.h>
4991+
4992 /* bytes per L1 cache line */
4993 #define L1_CACHE_SHIFT 4
4994-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
4995+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4996
4997 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4998
4999diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5000index 4efe96a..60e8699 100644
5001--- a/arch/microblaze/include/asm/cache.h
5002+++ b/arch/microblaze/include/asm/cache.h
5003@@ -13,11 +13,12 @@
5004 #ifndef _ASM_MICROBLAZE_CACHE_H
5005 #define _ASM_MICROBLAZE_CACHE_H
5006
5007+#include <linux/const.h>
5008 #include <asm/registers.h>
5009
5010 #define L1_CACHE_SHIFT 5
5011 /* word-granular cache in microblaze */
5012-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5013+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5014
5015 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5016
5017diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5018index 01cc6ba..bcb7a5d 100644
5019--- a/arch/mips/include/asm/atomic.h
5020+++ b/arch/mips/include/asm/atomic.h
5021@@ -21,6 +21,10 @@
5022 #include <asm/cmpxchg.h>
5023 #include <asm/war.h>
5024
5025+#ifdef CONFIG_GENERIC_ATOMIC64
5026+#include <asm-generic/atomic64.h>
5027+#endif
5028+
5029 #define ATOMIC_INIT(i) { (i) }
5030
5031 /*
5032@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5033 */
5034 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5035
5036+#define atomic64_read_unchecked(v) atomic64_read(v)
5037+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5038+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5039+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5040+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5041+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5042+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5043+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5044+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5045+
5046 #endif /* CONFIG_64BIT */
5047
5048 /*
5049diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5050index b4db69f..8f3b093 100644
5051--- a/arch/mips/include/asm/cache.h
5052+++ b/arch/mips/include/asm/cache.h
5053@@ -9,10 +9,11 @@
5054 #ifndef _ASM_CACHE_H
5055 #define _ASM_CACHE_H
5056
5057+#include <linux/const.h>
5058 #include <kmalloc.h>
5059
5060 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5061-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5062+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5063
5064 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5065 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5066diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5067index 455c0ac..ad65fbe 100644
5068--- a/arch/mips/include/asm/elf.h
5069+++ b/arch/mips/include/asm/elf.h
5070@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5071 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5072 #endif
5073
5074+#ifdef CONFIG_PAX_ASLR
5075+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5076+
5077+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5078+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5079+#endif
5080+
5081 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5082 struct linux_binprm;
5083 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5084 int uses_interp);
5085
5086-struct mm_struct;
5087-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5088-#define arch_randomize_brk arch_randomize_brk
5089-
5090 #endif /* _ASM_ELF_H */
5091diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5092index c1f6afa..38cc6e9 100644
5093--- a/arch/mips/include/asm/exec.h
5094+++ b/arch/mips/include/asm/exec.h
5095@@ -12,6 +12,6 @@
5096 #ifndef _ASM_EXEC_H
5097 #define _ASM_EXEC_H
5098
5099-extern unsigned long arch_align_stack(unsigned long sp);
5100+#define arch_align_stack(x) ((x) & ~0xfUL)
5101
5102 #endif /* _ASM_EXEC_H */
5103diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5104index dbaec94..6a14935 100644
5105--- a/arch/mips/include/asm/page.h
5106+++ b/arch/mips/include/asm/page.h
5107@@ -96,7 +96,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5108 #ifdef CONFIG_CPU_MIPS32
5109 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5110 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5111- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5112+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5113 #else
5114 typedef struct { unsigned long long pte; } pte_t;
5115 #define pte_val(x) ((x).pte)
5116diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5117index 881d18b..cea38bc 100644
5118--- a/arch/mips/include/asm/pgalloc.h
5119+++ b/arch/mips/include/asm/pgalloc.h
5120@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5121 {
5122 set_pud(pud, __pud((unsigned long)pmd));
5123 }
5124+
5125+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5126+{
5127+ pud_populate(mm, pud, pmd);
5128+}
5129 #endif
5130
5131 /*
5132diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5133index b2050b9..d71bb1b 100644
5134--- a/arch/mips/include/asm/thread_info.h
5135+++ b/arch/mips/include/asm/thread_info.h
5136@@ -111,6 +111,8 @@ register struct thread_info *__current_thread_info __asm__("$28");
5137 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5138 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5139 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5140+/* li takes a 32bit immediate */
5141+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5142 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5143
5144 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5145@@ -126,15 +128,18 @@ register struct thread_info *__current_thread_info __asm__("$28");
5146 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5147 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5148 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5149+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5150+
5151+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5152
5153 /* work to do in syscall_trace_leave() */
5154-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5155+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5156
5157 /* work to do on interrupt/exception return */
5158 #define _TIF_WORK_MASK \
5159 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5160 /* work to do on any return to u-space */
5161-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5162+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5163
5164 #endif /* __KERNEL__ */
5165
5166diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5167index 9fdd8bc..4bd7f1a 100644
5168--- a/arch/mips/kernel/binfmt_elfn32.c
5169+++ b/arch/mips/kernel/binfmt_elfn32.c
5170@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5171 #undef ELF_ET_DYN_BASE
5172 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5173
5174+#ifdef CONFIG_PAX_ASLR
5175+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5176+
5177+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5178+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5179+#endif
5180+
5181 #include <asm/processor.h>
5182 #include <linux/module.h>
5183 #include <linux/elfcore.h>
5184diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5185index ff44823..97f8906 100644
5186--- a/arch/mips/kernel/binfmt_elfo32.c
5187+++ b/arch/mips/kernel/binfmt_elfo32.c
5188@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5189 #undef ELF_ET_DYN_BASE
5190 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5191
5192+#ifdef CONFIG_PAX_ASLR
5193+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5194+
5195+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5196+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5197+#endif
5198+
5199 #include <asm/processor.h>
5200
5201 /*
5202diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5203index a11c6f9..be5e164 100644
5204--- a/arch/mips/kernel/process.c
5205+++ b/arch/mips/kernel/process.c
5206@@ -460,15 +460,3 @@ unsigned long get_wchan(struct task_struct *task)
5207 out:
5208 return pc;
5209 }
5210-
5211-/*
5212- * Don't forget that the stack pointer must be aligned on a 8 bytes
5213- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5214- */
5215-unsigned long arch_align_stack(unsigned long sp)
5216-{
5217- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5218- sp -= get_random_int() & ~PAGE_MASK;
5219-
5220- return sp & ALMASK;
5221-}
5222diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5223index 4812c6d..2069554 100644
5224--- a/arch/mips/kernel/ptrace.c
5225+++ b/arch/mips/kernel/ptrace.c
5226@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5227 return arch;
5228 }
5229
5230+#ifdef CONFIG_GRKERNSEC_SETXID
5231+extern void gr_delayed_cred_worker(void);
5232+#endif
5233+
5234 /*
5235 * Notification of system call entry/exit
5236 * - triggered by current->work.syscall_trace
5237@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5238 /* do the secure computing check first */
5239 secure_computing_strict(regs->regs[2]);
5240
5241+#ifdef CONFIG_GRKERNSEC_SETXID
5242+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5243+ gr_delayed_cred_worker();
5244+#endif
5245+
5246 if (!(current->ptrace & PT_PTRACED))
5247 goto out;
5248
5249diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5250index d20a4bc..7096ae5 100644
5251--- a/arch/mips/kernel/scall32-o32.S
5252+++ b/arch/mips/kernel/scall32-o32.S
5253@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5254
5255 stack_done:
5256 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5257- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5258+ li t1, _TIF_SYSCALL_WORK
5259 and t0, t1
5260 bnez t0, syscall_trace_entry # -> yes
5261
5262diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5263index b64f642..0fe6eab 100644
5264--- a/arch/mips/kernel/scall64-64.S
5265+++ b/arch/mips/kernel/scall64-64.S
5266@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5267
5268 sd a3, PT_R26(sp) # save a3 for syscall restarting
5269
5270- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5271+ li t1, _TIF_SYSCALL_WORK
5272 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5273 and t0, t1, t0
5274 bnez t0, syscall_trace_entry
5275diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5276index c29ac19..c592d05 100644
5277--- a/arch/mips/kernel/scall64-n32.S
5278+++ b/arch/mips/kernel/scall64-n32.S
5279@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5280
5281 sd a3, PT_R26(sp) # save a3 for syscall restarting
5282
5283- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5284+ li t1, _TIF_SYSCALL_WORK
5285 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5286 and t0, t1, t0
5287 bnez t0, n32_syscall_trace_entry
5288diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5289index cf3e75e..72e93fe 100644
5290--- a/arch/mips/kernel/scall64-o32.S
5291+++ b/arch/mips/kernel/scall64-o32.S
5292@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5293 PTR 4b, bad_stack
5294 .previous
5295
5296- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5297+ li t1, _TIF_SYSCALL_WORK
5298 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5299 and t0, t1, t0
5300 bnez t0, trace_a_syscall
5301diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5302index ddcec1e..c7f983e 100644
5303--- a/arch/mips/mm/fault.c
5304+++ b/arch/mips/mm/fault.c
5305@@ -27,6 +27,23 @@
5306 #include <asm/highmem.h> /* For VMALLOC_END */
5307 #include <linux/kdebug.h>
5308
5309+#ifdef CONFIG_PAX_PAGEEXEC
5310+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5311+{
5312+ unsigned long i;
5313+
5314+ printk(KERN_ERR "PAX: bytes at PC: ");
5315+ for (i = 0; i < 5; i++) {
5316+ unsigned int c;
5317+ if (get_user(c, (unsigned int *)pc+i))
5318+ printk(KERN_CONT "???????? ");
5319+ else
5320+ printk(KERN_CONT "%08x ", c);
5321+ }
5322+ printk("\n");
5323+}
5324+#endif
5325+
5326 /*
5327 * This routine handles page faults. It determines the address,
5328 * and the problem, and then passes it off to one of the appropriate
5329diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5330index 7e5fe27..479a219 100644
5331--- a/arch/mips/mm/mmap.c
5332+++ b/arch/mips/mm/mmap.c
5333@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5334 struct vm_area_struct *vma;
5335 unsigned long addr = addr0;
5336 int do_color_align;
5337+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5338 struct vm_unmapped_area_info info;
5339
5340 if (unlikely(len > TASK_SIZE))
5341@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5342 do_color_align = 1;
5343
5344 /* requesting a specific address */
5345+
5346+#ifdef CONFIG_PAX_RANDMMAP
5347+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5348+#endif
5349+
5350 if (addr) {
5351 if (do_color_align)
5352 addr = COLOUR_ALIGN(addr, pgoff);
5353@@ -91,8 +97,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5354 addr = PAGE_ALIGN(addr);
5355
5356 vma = find_vma(mm, addr);
5357- if (TASK_SIZE - len >= addr &&
5358- (!vma || addr + len <= vma->vm_start))
5359+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5360 return addr;
5361 }
5362
5363@@ -146,6 +151,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5364 {
5365 unsigned long random_factor = 0UL;
5366
5367+#ifdef CONFIG_PAX_RANDMMAP
5368+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5369+#endif
5370+
5371 if (current->flags & PF_RANDOMIZE) {
5372 random_factor = get_random_int();
5373 random_factor = random_factor << PAGE_SHIFT;
5374@@ -157,42 +166,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5375
5376 if (mmap_is_legacy()) {
5377 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5378+
5379+#ifdef CONFIG_PAX_RANDMMAP
5380+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5381+ mm->mmap_base += mm->delta_mmap;
5382+#endif
5383+
5384 mm->get_unmapped_area = arch_get_unmapped_area;
5385 mm->unmap_area = arch_unmap_area;
5386 } else {
5387 mm->mmap_base = mmap_base(random_factor);
5388+
5389+#ifdef CONFIG_PAX_RANDMMAP
5390+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5391+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5392+#endif
5393+
5394 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5395 mm->unmap_area = arch_unmap_area_topdown;
5396 }
5397 }
5398
5399-static inline unsigned long brk_rnd(void)
5400-{
5401- unsigned long rnd = get_random_int();
5402-
5403- rnd = rnd << PAGE_SHIFT;
5404- /* 8MB for 32bit, 256MB for 64bit */
5405- if (TASK_IS_32BIT_ADDR)
5406- rnd = rnd & 0x7ffffful;
5407- else
5408- rnd = rnd & 0xffffffful;
5409-
5410- return rnd;
5411-}
5412-
5413-unsigned long arch_randomize_brk(struct mm_struct *mm)
5414-{
5415- unsigned long base = mm->brk;
5416- unsigned long ret;
5417-
5418- ret = PAGE_ALIGN(base + brk_rnd());
5419-
5420- if (ret < mm->brk)
5421- return mm->brk;
5422-
5423- return ret;
5424-}
5425-
5426 int __virt_addr_valid(const volatile void *kaddr)
5427 {
5428 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5429diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5430index 967d144..db12197 100644
5431--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5432+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5433@@ -11,12 +11,14 @@
5434 #ifndef _ASM_PROC_CACHE_H
5435 #define _ASM_PROC_CACHE_H
5436
5437+#include <linux/const.h>
5438+
5439 /* L1 cache */
5440
5441 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5442 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5443-#define L1_CACHE_BYTES 16 /* bytes per entry */
5444 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5445+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5446 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5447
5448 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5449diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5450index bcb5df2..84fabd2 100644
5451--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5452+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5453@@ -16,13 +16,15 @@
5454 #ifndef _ASM_PROC_CACHE_H
5455 #define _ASM_PROC_CACHE_H
5456
5457+#include <linux/const.h>
5458+
5459 /*
5460 * L1 cache
5461 */
5462 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5463 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5464-#define L1_CACHE_BYTES 32 /* bytes per entry */
5465 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5466+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5467 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5468
5469 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5470diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5471index 4ce7a01..449202a 100644
5472--- a/arch/openrisc/include/asm/cache.h
5473+++ b/arch/openrisc/include/asm/cache.h
5474@@ -19,11 +19,13 @@
5475 #ifndef __ASM_OPENRISC_CACHE_H
5476 #define __ASM_OPENRISC_CACHE_H
5477
5478+#include <linux/const.h>
5479+
5480 /* FIXME: How can we replace these with values from the CPU...
5481 * they shouldn't be hard-coded!
5482 */
5483
5484-#define L1_CACHE_BYTES 16
5485 #define L1_CACHE_SHIFT 4
5486+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5487
5488 #endif /* __ASM_OPENRISC_CACHE_H */
5489diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5490index af9cf30..2aae9b2 100644
5491--- a/arch/parisc/include/asm/atomic.h
5492+++ b/arch/parisc/include/asm/atomic.h
5493@@ -229,6 +229,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5494
5495 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
5496
5497+#define atomic64_read_unchecked(v) atomic64_read(v)
5498+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5499+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5500+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5501+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5502+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5503+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5504+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5505+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5506+
5507 #endif /* !CONFIG_64BIT */
5508
5509
5510diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5511index 47f11c7..3420df2 100644
5512--- a/arch/parisc/include/asm/cache.h
5513+++ b/arch/parisc/include/asm/cache.h
5514@@ -5,6 +5,7 @@
5515 #ifndef __ARCH_PARISC_CACHE_H
5516 #define __ARCH_PARISC_CACHE_H
5517
5518+#include <linux/const.h>
5519
5520 /*
5521 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5522@@ -15,13 +16,13 @@
5523 * just ruin performance.
5524 */
5525 #ifdef CONFIG_PA20
5526-#define L1_CACHE_BYTES 64
5527 #define L1_CACHE_SHIFT 6
5528 #else
5529-#define L1_CACHE_BYTES 32
5530 #define L1_CACHE_SHIFT 5
5531 #endif
5532
5533+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5534+
5535 #ifndef __ASSEMBLY__
5536
5537 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5538diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5539index 19f6cb1..6c78cf2 100644
5540--- a/arch/parisc/include/asm/elf.h
5541+++ b/arch/parisc/include/asm/elf.h
5542@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5543
5544 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5545
5546+#ifdef CONFIG_PAX_ASLR
5547+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5548+
5549+#define PAX_DELTA_MMAP_LEN 16
5550+#define PAX_DELTA_STACK_LEN 16
5551+#endif
5552+
5553 /* This yields a mask that user programs can use to figure out what
5554 instruction set this CPU supports. This could be done in user space,
5555 but it's not easy, and we've already done it here. */
5556diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5557index fc987a1..6e068ef 100644
5558--- a/arch/parisc/include/asm/pgalloc.h
5559+++ b/arch/parisc/include/asm/pgalloc.h
5560@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5561 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5562 }
5563
5564+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5565+{
5566+ pgd_populate(mm, pgd, pmd);
5567+}
5568+
5569 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5570 {
5571 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5572@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5573 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5574 #define pmd_free(mm, x) do { } while (0)
5575 #define pgd_populate(mm, pmd, pte) BUG()
5576+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5577
5578 #endif
5579
5580diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5581index 7df49fa..38b62bf 100644
5582--- a/arch/parisc/include/asm/pgtable.h
5583+++ b/arch/parisc/include/asm/pgtable.h
5584@@ -218,6 +218,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5585 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5586 #define PAGE_COPY PAGE_EXECREAD
5587 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5588+
5589+#ifdef CONFIG_PAX_PAGEEXEC
5590+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5591+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5592+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5593+#else
5594+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5595+# define PAGE_COPY_NOEXEC PAGE_COPY
5596+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5597+#endif
5598+
5599 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5600 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5601 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5602diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5603index 4ba2c93..f5e3974 100644
5604--- a/arch/parisc/include/asm/uaccess.h
5605+++ b/arch/parisc/include/asm/uaccess.h
5606@@ -251,10 +251,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5607 const void __user *from,
5608 unsigned long n)
5609 {
5610- int sz = __compiletime_object_size(to);
5611+ size_t sz = __compiletime_object_size(to);
5612 int ret = -EFAULT;
5613
5614- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5615+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5616 ret = __copy_from_user(to, from, n);
5617 else
5618 copy_from_user_overflow();
5619diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5620index 2a625fb..9908930 100644
5621--- a/arch/parisc/kernel/module.c
5622+++ b/arch/parisc/kernel/module.c
5623@@ -98,16 +98,38 @@
5624
5625 /* three functions to determine where in the module core
5626 * or init pieces the location is */
5627+static inline int in_init_rx(struct module *me, void *loc)
5628+{
5629+ return (loc >= me->module_init_rx &&
5630+ loc < (me->module_init_rx + me->init_size_rx));
5631+}
5632+
5633+static inline int in_init_rw(struct module *me, void *loc)
5634+{
5635+ return (loc >= me->module_init_rw &&
5636+ loc < (me->module_init_rw + me->init_size_rw));
5637+}
5638+
5639 static inline int in_init(struct module *me, void *loc)
5640 {
5641- return (loc >= me->module_init &&
5642- loc <= (me->module_init + me->init_size));
5643+ return in_init_rx(me, loc) || in_init_rw(me, loc);
5644+}
5645+
5646+static inline int in_core_rx(struct module *me, void *loc)
5647+{
5648+ return (loc >= me->module_core_rx &&
5649+ loc < (me->module_core_rx + me->core_size_rx));
5650+}
5651+
5652+static inline int in_core_rw(struct module *me, void *loc)
5653+{
5654+ return (loc >= me->module_core_rw &&
5655+ loc < (me->module_core_rw + me->core_size_rw));
5656 }
5657
5658 static inline int in_core(struct module *me, void *loc)
5659 {
5660- return (loc >= me->module_core &&
5661- loc <= (me->module_core + me->core_size));
5662+ return in_core_rx(me, loc) || in_core_rw(me, loc);
5663 }
5664
5665 static inline int in_local(struct module *me, void *loc)
5666@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5667 }
5668
5669 /* align things a bit */
5670- me->core_size = ALIGN(me->core_size, 16);
5671- me->arch.got_offset = me->core_size;
5672- me->core_size += gots * sizeof(struct got_entry);
5673+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5674+ me->arch.got_offset = me->core_size_rw;
5675+ me->core_size_rw += gots * sizeof(struct got_entry);
5676
5677- me->core_size = ALIGN(me->core_size, 16);
5678- me->arch.fdesc_offset = me->core_size;
5679- me->core_size += fdescs * sizeof(Elf_Fdesc);
5680+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5681+ me->arch.fdesc_offset = me->core_size_rw;
5682+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5683
5684 me->arch.got_max = gots;
5685 me->arch.fdesc_max = fdescs;
5686@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5687
5688 BUG_ON(value == 0);
5689
5690- got = me->module_core + me->arch.got_offset;
5691+ got = me->module_core_rw + me->arch.got_offset;
5692 for (i = 0; got[i].addr; i++)
5693 if (got[i].addr == value)
5694 goto out;
5695@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5696 #ifdef CONFIG_64BIT
5697 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5698 {
5699- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5700+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5701
5702 if (!value) {
5703 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5704@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5705
5706 /* Create new one */
5707 fdesc->addr = value;
5708- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5709+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5710 return (Elf_Addr)fdesc;
5711 }
5712 #endif /* CONFIG_64BIT */
5713@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5714
5715 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5716 end = table + sechdrs[me->arch.unwind_section].sh_size;
5717- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5718+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5719
5720 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5721 me->arch.unwind_section, table, end, gp);
5722diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5723index f76c108..92bad82 100644
5724--- a/arch/parisc/kernel/sys_parisc.c
5725+++ b/arch/parisc/kernel/sys_parisc.c
5726@@ -33,9 +33,11 @@
5727 #include <linux/utsname.h>
5728 #include <linux/personality.h>
5729
5730-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5731+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5732+ unsigned long flags)
5733 {
5734 struct vm_area_struct *vma;
5735+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5736
5737 addr = PAGE_ALIGN(addr);
5738
5739@@ -43,7 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5740 /* At this point: (!vma || addr < vma->vm_end). */
5741 if (TASK_SIZE - len < addr)
5742 return -ENOMEM;
5743- if (!vma || addr + len <= vma->vm_start)
5744+ if (check_heap_stack_gap(vma, addr, len, offset))
5745 return addr;
5746 addr = vma->vm_end;
5747 }
5748@@ -67,11 +69,12 @@ static int get_offset(struct address_space *mapping)
5749 return offset & 0x3FF000;
5750 }
5751
5752-static unsigned long get_shared_area(struct address_space *mapping,
5753- unsigned long addr, unsigned long len, unsigned long pgoff)
5754+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5755+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5756 {
5757 struct vm_area_struct *vma;
5758 int offset = mapping ? get_offset(mapping) : 0;
5759+ unsigned long rand_offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5760
5761 offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
5762
5763@@ -81,7 +84,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5764 /* At this point: (!vma || addr < vma->vm_end). */
5765 if (TASK_SIZE - len < addr)
5766 return -ENOMEM;
5767- if (!vma || addr + len <= vma->vm_start)
5768+ if (check_heap_stack_gap(vma, addr, len, rand_offset))
5769 return addr;
5770 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
5771 if (addr < vma->vm_end) /* handle wraparound */
5772@@ -100,14 +103,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5773 if (flags & MAP_FIXED)
5774 return addr;
5775 if (!addr)
5776- addr = TASK_UNMAPPED_BASE;
5777+ addr = current->mm->mmap_base;
5778
5779 if (filp) {
5780- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
5781+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
5782 } else if(flags & MAP_SHARED) {
5783- addr = get_shared_area(NULL, addr, len, pgoff);
5784+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
5785 } else {
5786- addr = get_unshared_area(addr, len);
5787+ addr = get_unshared_area(filp, addr, len, flags);
5788 }
5789 return addr;
5790 }
5791diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
5792index 45ba99f..8e22c33 100644
5793--- a/arch/parisc/kernel/traps.c
5794+++ b/arch/parisc/kernel/traps.c
5795@@ -732,9 +732,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
5796
5797 down_read(&current->mm->mmap_sem);
5798 vma = find_vma(current->mm,regs->iaoq[0]);
5799- if (vma && (regs->iaoq[0] >= vma->vm_start)
5800- && (vma->vm_flags & VM_EXEC)) {
5801-
5802+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
5803 fault_address = regs->iaoq[0];
5804 fault_space = regs->iasq[0];
5805
5806diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
5807index 18162ce..94de376 100644
5808--- a/arch/parisc/mm/fault.c
5809+++ b/arch/parisc/mm/fault.c
5810@@ -15,6 +15,7 @@
5811 #include <linux/sched.h>
5812 #include <linux/interrupt.h>
5813 #include <linux/module.h>
5814+#include <linux/unistd.h>
5815
5816 #include <asm/uaccess.h>
5817 #include <asm/traps.h>
5818@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
5819 static unsigned long
5820 parisc_acctyp(unsigned long code, unsigned int inst)
5821 {
5822- if (code == 6 || code == 16)
5823+ if (code == 6 || code == 7 || code == 16)
5824 return VM_EXEC;
5825
5826 switch (inst & 0xf0000000) {
5827@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
5828 }
5829 #endif
5830
5831+#ifdef CONFIG_PAX_PAGEEXEC
5832+/*
5833+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
5834+ *
5835+ * returns 1 when task should be killed
5836+ * 2 when rt_sigreturn trampoline was detected
5837+ * 3 when unpatched PLT trampoline was detected
5838+ */
5839+static int pax_handle_fetch_fault(struct pt_regs *regs)
5840+{
5841+
5842+#ifdef CONFIG_PAX_EMUPLT
5843+ int err;
5844+
5845+ do { /* PaX: unpatched PLT emulation */
5846+ unsigned int bl, depwi;
5847+
5848+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
5849+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
5850+
5851+ if (err)
5852+ break;
5853+
5854+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
5855+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
5856+
5857+ err = get_user(ldw, (unsigned int *)addr);
5858+ err |= get_user(bv, (unsigned int *)(addr+4));
5859+ err |= get_user(ldw2, (unsigned int *)(addr+8));
5860+
5861+ if (err)
5862+ break;
5863+
5864+ if (ldw == 0x0E801096U &&
5865+ bv == 0xEAC0C000U &&
5866+ ldw2 == 0x0E881095U)
5867+ {
5868+ unsigned int resolver, map;
5869+
5870+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
5871+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
5872+ if (err)
5873+ break;
5874+
5875+ regs->gr[20] = instruction_pointer(regs)+8;
5876+ regs->gr[21] = map;
5877+ regs->gr[22] = resolver;
5878+ regs->iaoq[0] = resolver | 3UL;
5879+ regs->iaoq[1] = regs->iaoq[0] + 4;
5880+ return 3;
5881+ }
5882+ }
5883+ } while (0);
5884+#endif
5885+
5886+#ifdef CONFIG_PAX_EMUTRAMP
5887+
5888+#ifndef CONFIG_PAX_EMUSIGRT
5889+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
5890+ return 1;
5891+#endif
5892+
5893+ do { /* PaX: rt_sigreturn emulation */
5894+ unsigned int ldi1, ldi2, bel, nop;
5895+
5896+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
5897+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
5898+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
5899+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
5900+
5901+ if (err)
5902+ break;
5903+
5904+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
5905+ ldi2 == 0x3414015AU &&
5906+ bel == 0xE4008200U &&
5907+ nop == 0x08000240U)
5908+ {
5909+ regs->gr[25] = (ldi1 & 2) >> 1;
5910+ regs->gr[20] = __NR_rt_sigreturn;
5911+ regs->gr[31] = regs->iaoq[1] + 16;
5912+ regs->sr[0] = regs->iasq[1];
5913+ regs->iaoq[0] = 0x100UL;
5914+ regs->iaoq[1] = regs->iaoq[0] + 4;
5915+ regs->iasq[0] = regs->sr[2];
5916+ regs->iasq[1] = regs->sr[2];
5917+ return 2;
5918+ }
5919+ } while (0);
5920+#endif
5921+
5922+ return 1;
5923+}
5924+
5925+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5926+{
5927+ unsigned long i;
5928+
5929+ printk(KERN_ERR "PAX: bytes at PC: ");
5930+ for (i = 0; i < 5; i++) {
5931+ unsigned int c;
5932+ if (get_user(c, (unsigned int *)pc+i))
5933+ printk(KERN_CONT "???????? ");
5934+ else
5935+ printk(KERN_CONT "%08x ", c);
5936+ }
5937+ printk("\n");
5938+}
5939+#endif
5940+
5941 int fixup_exception(struct pt_regs *regs)
5942 {
5943 const struct exception_table_entry *fix;
5944@@ -192,8 +303,33 @@ good_area:
5945
5946 acc_type = parisc_acctyp(code,regs->iir);
5947
5948- if ((vma->vm_flags & acc_type) != acc_type)
5949+ if ((vma->vm_flags & acc_type) != acc_type) {
5950+
5951+#ifdef CONFIG_PAX_PAGEEXEC
5952+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
5953+ (address & ~3UL) == instruction_pointer(regs))
5954+ {
5955+ up_read(&mm->mmap_sem);
5956+ switch (pax_handle_fetch_fault(regs)) {
5957+
5958+#ifdef CONFIG_PAX_EMUPLT
5959+ case 3:
5960+ return;
5961+#endif
5962+
5963+#ifdef CONFIG_PAX_EMUTRAMP
5964+ case 2:
5965+ return;
5966+#endif
5967+
5968+ }
5969+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
5970+ do_group_exit(SIGKILL);
5971+ }
5972+#endif
5973+
5974 goto bad_area;
5975+ }
5976
5977 /*
5978 * If for any reason at all we couldn't handle the fault, make
5979diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
5980index e3b1d41..8e81edf 100644
5981--- a/arch/powerpc/include/asm/atomic.h
5982+++ b/arch/powerpc/include/asm/atomic.h
5983@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
5984 return t1;
5985 }
5986
5987+#define atomic64_read_unchecked(v) atomic64_read(v)
5988+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5989+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5990+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5991+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5992+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5993+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5994+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5995+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5996+
5997 #endif /* __powerpc64__ */
5998
5999 #endif /* __KERNEL__ */
6000diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6001index 9e495c9..b6878e5 100644
6002--- a/arch/powerpc/include/asm/cache.h
6003+++ b/arch/powerpc/include/asm/cache.h
6004@@ -3,6 +3,7 @@
6005
6006 #ifdef __KERNEL__
6007
6008+#include <linux/const.h>
6009
6010 /* bytes per L1 cache line */
6011 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6012@@ -22,7 +23,7 @@
6013 #define L1_CACHE_SHIFT 7
6014 #endif
6015
6016-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6017+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6018
6019 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6020
6021diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6022index 6abf0a1..459d0f1 100644
6023--- a/arch/powerpc/include/asm/elf.h
6024+++ b/arch/powerpc/include/asm/elf.h
6025@@ -28,8 +28,19 @@
6026 the loader. We need to make sure that it is out of the way of the program
6027 that it will "exec", and that there is sufficient room for the brk. */
6028
6029-extern unsigned long randomize_et_dyn(unsigned long base);
6030-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6031+#define ELF_ET_DYN_BASE (0x20000000)
6032+
6033+#ifdef CONFIG_PAX_ASLR
6034+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6035+
6036+#ifdef __powerpc64__
6037+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6038+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6039+#else
6040+#define PAX_DELTA_MMAP_LEN 15
6041+#define PAX_DELTA_STACK_LEN 15
6042+#endif
6043+#endif
6044
6045 /*
6046 * Our registers are always unsigned longs, whether we're a 32 bit
6047@@ -124,10 +135,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6048 (0x7ff >> (PAGE_SHIFT - 12)) : \
6049 (0x3ffff >> (PAGE_SHIFT - 12)))
6050
6051-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6052-#define arch_randomize_brk arch_randomize_brk
6053-
6054-
6055 #ifdef CONFIG_SPU_BASE
6056 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6057 #define NT_SPU 1
6058diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6059index 8196e9c..d83a9f3 100644
6060--- a/arch/powerpc/include/asm/exec.h
6061+++ b/arch/powerpc/include/asm/exec.h
6062@@ -4,6 +4,6 @@
6063 #ifndef _ASM_POWERPC_EXEC_H
6064 #define _ASM_POWERPC_EXEC_H
6065
6066-extern unsigned long arch_align_stack(unsigned long sp);
6067+#define arch_align_stack(x) ((x) & ~0xfUL)
6068
6069 #endif /* _ASM_POWERPC_EXEC_H */
6070diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6071index 5acabbd..7ea14fa 100644
6072--- a/arch/powerpc/include/asm/kmap_types.h
6073+++ b/arch/powerpc/include/asm/kmap_types.h
6074@@ -10,7 +10,7 @@
6075 * 2 of the License, or (at your option) any later version.
6076 */
6077
6078-#define KM_TYPE_NR 16
6079+#define KM_TYPE_NR 17
6080
6081 #endif /* __KERNEL__ */
6082 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6083diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6084index 8565c25..2865190 100644
6085--- a/arch/powerpc/include/asm/mman.h
6086+++ b/arch/powerpc/include/asm/mman.h
6087@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6088 }
6089 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6090
6091-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6092+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6093 {
6094 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6095 }
6096diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6097index f072e97..b436dee 100644
6098--- a/arch/powerpc/include/asm/page.h
6099+++ b/arch/powerpc/include/asm/page.h
6100@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6101 * and needs to be executable. This means the whole heap ends
6102 * up being executable.
6103 */
6104-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6105- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6106+#define VM_DATA_DEFAULT_FLAGS32 \
6107+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6108+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6109
6110 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6111 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6112@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6113 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6114 #endif
6115
6116+#define ktla_ktva(addr) (addr)
6117+#define ktva_ktla(addr) (addr)
6118+
6119 /*
6120 * Use the top bit of the higher-level page table entries to indicate whether
6121 * the entries we point to contain hugepages. This works because we know that
6122diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6123index cd915d6..c10cee8 100644
6124--- a/arch/powerpc/include/asm/page_64.h
6125+++ b/arch/powerpc/include/asm/page_64.h
6126@@ -154,15 +154,18 @@ do { \
6127 * stack by default, so in the absence of a PT_GNU_STACK program header
6128 * we turn execute permission off.
6129 */
6130-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6131- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6132+#define VM_STACK_DEFAULT_FLAGS32 \
6133+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6134+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6135
6136 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6137 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6138
6139+#ifndef CONFIG_PAX_PAGEEXEC
6140 #define VM_STACK_DEFAULT_FLAGS \
6141 (is_32bit_task() ? \
6142 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6143+#endif
6144
6145 #include <asm-generic/getorder.h>
6146
6147diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6148index 292725c..f87ae14 100644
6149--- a/arch/powerpc/include/asm/pgalloc-64.h
6150+++ b/arch/powerpc/include/asm/pgalloc-64.h
6151@@ -50,6 +50,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6152 #ifndef CONFIG_PPC_64K_PAGES
6153
6154 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6155+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6156
6157 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6158 {
6159@@ -67,6 +68,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6160 pud_set(pud, (unsigned long)pmd);
6161 }
6162
6163+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6164+{
6165+ pud_populate(mm, pud, pmd);
6166+}
6167+
6168 #define pmd_populate(mm, pmd, pte_page) \
6169 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6170 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6171@@ -76,6 +82,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6172 #else /* CONFIG_PPC_64K_PAGES */
6173
6174 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6175+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6176
6177 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6178 pte_t *pte)
6179diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6180index a9cbd3b..3b67efa 100644
6181--- a/arch/powerpc/include/asm/pgtable.h
6182+++ b/arch/powerpc/include/asm/pgtable.h
6183@@ -2,6 +2,7 @@
6184 #define _ASM_POWERPC_PGTABLE_H
6185 #ifdef __KERNEL__
6186
6187+#include <linux/const.h>
6188 #ifndef __ASSEMBLY__
6189 #include <asm/processor.h> /* For TASK_SIZE */
6190 #include <asm/mmu.h>
6191diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6192index 4aad413..85d86bf 100644
6193--- a/arch/powerpc/include/asm/pte-hash32.h
6194+++ b/arch/powerpc/include/asm/pte-hash32.h
6195@@ -21,6 +21,7 @@
6196 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6197 #define _PAGE_USER 0x004 /* usermode access allowed */
6198 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6199+#define _PAGE_EXEC _PAGE_GUARDED
6200 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6201 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6202 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6203diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6204index 3d5c9dc..62f8414 100644
6205--- a/arch/powerpc/include/asm/reg.h
6206+++ b/arch/powerpc/include/asm/reg.h
6207@@ -215,6 +215,7 @@
6208 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6209 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6210 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6211+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6212 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6213 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6214 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6215diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6216index 406b7b9..af63426 100644
6217--- a/arch/powerpc/include/asm/thread_info.h
6218+++ b/arch/powerpc/include/asm/thread_info.h
6219@@ -97,7 +97,6 @@ static inline struct thread_info *current_thread_info(void)
6220 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6221 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6222 #define TIF_SINGLESTEP 8 /* singlestepping active */
6223-#define TIF_MEMDIE 9 /* is terminating due to OOM killer */
6224 #define TIF_SECCOMP 10 /* secure computing */
6225 #define TIF_RESTOREALL 11 /* Restore all regs (implies NOERROR) */
6226 #define TIF_NOERROR 12 /* Force successful syscall return */
6227@@ -106,6 +105,9 @@ static inline struct thread_info *current_thread_info(void)
6228 #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
6229 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6230 for stack store? */
6231+#define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6232+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6233+#define TIF_GRSEC_SETXID 9 /* update credentials on syscall entry/exit */
6234
6235 /* as above, but as bit values */
6236 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6237@@ -124,8 +126,10 @@ static inline struct thread_info *current_thread_info(void)
6238 #define _TIF_UPROBE (1<<TIF_UPROBE)
6239 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6240 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6241+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6242 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6243- _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT)
6244+ _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6245+ _TIF_GRSEC_SETXID)
6246
6247 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6248 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6249diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6250index 4db4959..335e00c 100644
6251--- a/arch/powerpc/include/asm/uaccess.h
6252+++ b/arch/powerpc/include/asm/uaccess.h
6253@@ -13,6 +13,8 @@
6254 #define VERIFY_READ 0
6255 #define VERIFY_WRITE 1
6256
6257+extern void check_object_size(const void *ptr, unsigned long n, bool to);
6258+
6259 /*
6260 * The fs value determines whether argument validity checking should be
6261 * performed or not. If get_fs() == USER_DS, checking is performed, with
6262@@ -318,52 +320,6 @@ do { \
6263 extern unsigned long __copy_tofrom_user(void __user *to,
6264 const void __user *from, unsigned long size);
6265
6266-#ifndef __powerpc64__
6267-
6268-static inline unsigned long copy_from_user(void *to,
6269- const void __user *from, unsigned long n)
6270-{
6271- unsigned long over;
6272-
6273- if (access_ok(VERIFY_READ, from, n))
6274- return __copy_tofrom_user((__force void __user *)to, from, n);
6275- if ((unsigned long)from < TASK_SIZE) {
6276- over = (unsigned long)from + n - TASK_SIZE;
6277- return __copy_tofrom_user((__force void __user *)to, from,
6278- n - over) + over;
6279- }
6280- return n;
6281-}
6282-
6283-static inline unsigned long copy_to_user(void __user *to,
6284- const void *from, unsigned long n)
6285-{
6286- unsigned long over;
6287-
6288- if (access_ok(VERIFY_WRITE, to, n))
6289- return __copy_tofrom_user(to, (__force void __user *)from, n);
6290- if ((unsigned long)to < TASK_SIZE) {
6291- over = (unsigned long)to + n - TASK_SIZE;
6292- return __copy_tofrom_user(to, (__force void __user *)from,
6293- n - over) + over;
6294- }
6295- return n;
6296-}
6297-
6298-#else /* __powerpc64__ */
6299-
6300-#define __copy_in_user(to, from, size) \
6301- __copy_tofrom_user((to), (from), (size))
6302-
6303-extern unsigned long copy_from_user(void *to, const void __user *from,
6304- unsigned long n);
6305-extern unsigned long copy_to_user(void __user *to, const void *from,
6306- unsigned long n);
6307-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6308- unsigned long n);
6309-
6310-#endif /* __powerpc64__ */
6311-
6312 static inline unsigned long __copy_from_user_inatomic(void *to,
6313 const void __user *from, unsigned long n)
6314 {
6315@@ -387,6 +343,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6316 if (ret == 0)
6317 return 0;
6318 }
6319+
6320+ if (!__builtin_constant_p(n))
6321+ check_object_size(to, n, false);
6322+
6323 return __copy_tofrom_user((__force void __user *)to, from, n);
6324 }
6325
6326@@ -413,6 +373,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6327 if (ret == 0)
6328 return 0;
6329 }
6330+
6331+ if (!__builtin_constant_p(n))
6332+ check_object_size(from, n, true);
6333+
6334 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6335 }
6336
6337@@ -430,6 +394,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6338 return __copy_to_user_inatomic(to, from, size);
6339 }
6340
6341+#ifndef __powerpc64__
6342+
6343+static inline unsigned long __must_check copy_from_user(void *to,
6344+ const void __user *from, unsigned long n)
6345+{
6346+ unsigned long over;
6347+
6348+ if ((long)n < 0)
6349+ return n;
6350+
6351+ if (access_ok(VERIFY_READ, from, n)) {
6352+ if (!__builtin_constant_p(n))
6353+ check_object_size(to, n, false);
6354+ return __copy_tofrom_user((__force void __user *)to, from, n);
6355+ }
6356+ if ((unsigned long)from < TASK_SIZE) {
6357+ over = (unsigned long)from + n - TASK_SIZE;
6358+ if (!__builtin_constant_p(n - over))
6359+ check_object_size(to, n - over, false);
6360+ return __copy_tofrom_user((__force void __user *)to, from,
6361+ n - over) + over;
6362+ }
6363+ return n;
6364+}
6365+
6366+static inline unsigned long __must_check copy_to_user(void __user *to,
6367+ const void *from, unsigned long n)
6368+{
6369+ unsigned long over;
6370+
6371+ if ((long)n < 0)
6372+ return n;
6373+
6374+ if (access_ok(VERIFY_WRITE, to, n)) {
6375+ if (!__builtin_constant_p(n))
6376+ check_object_size(from, n, true);
6377+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6378+ }
6379+ if ((unsigned long)to < TASK_SIZE) {
6380+ over = (unsigned long)to + n - TASK_SIZE;
6381+ if (!__builtin_constant_p(n))
6382+ check_object_size(from, n - over, true);
6383+ return __copy_tofrom_user(to, (__force void __user *)from,
6384+ n - over) + over;
6385+ }
6386+ return n;
6387+}
6388+
6389+#else /* __powerpc64__ */
6390+
6391+#define __copy_in_user(to, from, size) \
6392+ __copy_tofrom_user((to), (from), (size))
6393+
6394+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6395+{
6396+ if ((long)n < 0 || n > INT_MAX)
6397+ return n;
6398+
6399+ if (!__builtin_constant_p(n))
6400+ check_object_size(to, n, false);
6401+
6402+ if (likely(access_ok(VERIFY_READ, from, n)))
6403+ n = __copy_from_user(to, from, n);
6404+ else
6405+ memset(to, 0, n);
6406+ return n;
6407+}
6408+
6409+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6410+{
6411+ if ((long)n < 0 || n > INT_MAX)
6412+ return n;
6413+
6414+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6415+ if (!__builtin_constant_p(n))
6416+ check_object_size(from, n, true);
6417+ n = __copy_to_user(to, from, n);
6418+ }
6419+ return n;
6420+}
6421+
6422+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6423+ unsigned long n);
6424+
6425+#endif /* __powerpc64__ */
6426+
6427 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6428
6429 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6430diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6431index 4684e33..acc4d19e 100644
6432--- a/arch/powerpc/kernel/exceptions-64e.S
6433+++ b/arch/powerpc/kernel/exceptions-64e.S
6434@@ -715,6 +715,7 @@ storage_fault_common:
6435 std r14,_DAR(r1)
6436 std r15,_DSISR(r1)
6437 addi r3,r1,STACK_FRAME_OVERHEAD
6438+ bl .save_nvgprs
6439 mr r4,r14
6440 mr r5,r15
6441 ld r14,PACA_EXGEN+EX_R14(r13)
6442@@ -723,8 +724,7 @@ storage_fault_common:
6443 cmpdi r3,0
6444 bne- 1f
6445 b .ret_from_except_lite
6446-1: bl .save_nvgprs
6447- mr r5,r3
6448+1: mr r5,r3
6449 addi r3,r1,STACK_FRAME_OVERHEAD
6450 ld r4,_DAR(r1)
6451 bl .bad_page_fault
6452diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6453index 4665e82..080ea99 100644
6454--- a/arch/powerpc/kernel/exceptions-64s.S
6455+++ b/arch/powerpc/kernel/exceptions-64s.S
6456@@ -1206,10 +1206,10 @@ handle_page_fault:
6457 11: ld r4,_DAR(r1)
6458 ld r5,_DSISR(r1)
6459 addi r3,r1,STACK_FRAME_OVERHEAD
6460+ bl .save_nvgprs
6461 bl .do_page_fault
6462 cmpdi r3,0
6463 beq+ 12f
6464- bl .save_nvgprs
6465 mr r5,r3
6466 addi r3,r1,STACK_FRAME_OVERHEAD
6467 lwz r4,_DAR(r1)
6468diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6469index 2e3200c..72095ce 100644
6470--- a/arch/powerpc/kernel/module_32.c
6471+++ b/arch/powerpc/kernel/module_32.c
6472@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6473 me->arch.core_plt_section = i;
6474 }
6475 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6476- printk("Module doesn't contain .plt or .init.plt sections.\n");
6477+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6478 return -ENOEXEC;
6479 }
6480
6481@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6482
6483 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6484 /* Init, or core PLT? */
6485- if (location >= mod->module_core
6486- && location < mod->module_core + mod->core_size)
6487+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6488+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6489 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6490- else
6491+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6492+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6493 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6494+ else {
6495+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6496+ return ~0UL;
6497+ }
6498
6499 /* Find this entry, or if that fails, the next avail. entry */
6500 while (entry->jump[0]) {
6501diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6502index 8143067..21ae55b 100644
6503--- a/arch/powerpc/kernel/process.c
6504+++ b/arch/powerpc/kernel/process.c
6505@@ -680,8 +680,8 @@ void show_regs(struct pt_regs * regs)
6506 * Lookup NIP late so we have the best change of getting the
6507 * above info out without failing
6508 */
6509- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6510- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6511+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6512+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6513 #endif
6514 show_stack(current, (unsigned long *) regs->gpr[1]);
6515 if (!user_mode(regs))
6516@@ -1129,10 +1129,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6517 newsp = stack[0];
6518 ip = stack[STACK_FRAME_LR_SAVE];
6519 if (!firstframe || ip != lr) {
6520- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6521+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6522 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6523 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6524- printk(" (%pS)",
6525+ printk(" (%pA)",
6526 (void *)current->ret_stack[curr_frame].ret);
6527 curr_frame--;
6528 }
6529@@ -1152,7 +1152,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6530 struct pt_regs *regs = (struct pt_regs *)
6531 (sp + STACK_FRAME_OVERHEAD);
6532 lr = regs->link;
6533- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6534+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6535 regs->trap, (void *)regs->nip, (void *)lr);
6536 firstframe = 1;
6537 }
6538@@ -1194,58 +1194,3 @@ void __ppc64_runlatch_off(void)
6539 mtspr(SPRN_CTRLT, ctrl);
6540 }
6541 #endif /* CONFIG_PPC64 */
6542-
6543-unsigned long arch_align_stack(unsigned long sp)
6544-{
6545- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6546- sp -= get_random_int() & ~PAGE_MASK;
6547- return sp & ~0xf;
6548-}
6549-
6550-static inline unsigned long brk_rnd(void)
6551-{
6552- unsigned long rnd = 0;
6553-
6554- /* 8MB for 32bit, 1GB for 64bit */
6555- if (is_32bit_task())
6556- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6557- else
6558- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6559-
6560- return rnd << PAGE_SHIFT;
6561-}
6562-
6563-unsigned long arch_randomize_brk(struct mm_struct *mm)
6564-{
6565- unsigned long base = mm->brk;
6566- unsigned long ret;
6567-
6568-#ifdef CONFIG_PPC_STD_MMU_64
6569- /*
6570- * If we are using 1TB segments and we are allowed to randomise
6571- * the heap, we can put it above 1TB so it is backed by a 1TB
6572- * segment. Otherwise the heap will be in the bottom 1TB
6573- * which always uses 256MB segments and this may result in a
6574- * performance penalty.
6575- */
6576- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6577- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6578-#endif
6579-
6580- ret = PAGE_ALIGN(base + brk_rnd());
6581-
6582- if (ret < mm->brk)
6583- return mm->brk;
6584-
6585- return ret;
6586-}
6587-
6588-unsigned long randomize_et_dyn(unsigned long base)
6589-{
6590- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6591-
6592- if (ret < base)
6593- return base;
6594-
6595- return ret;
6596-}
6597diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6598index c497000..8fde506 100644
6599--- a/arch/powerpc/kernel/ptrace.c
6600+++ b/arch/powerpc/kernel/ptrace.c
6601@@ -1737,6 +1737,10 @@ long arch_ptrace(struct task_struct *child, long request,
6602 return ret;
6603 }
6604
6605+#ifdef CONFIG_GRKERNSEC_SETXID
6606+extern void gr_delayed_cred_worker(void);
6607+#endif
6608+
6609 /*
6610 * We must return the syscall number to actually look up in the table.
6611 * This can be -1L to skip running any syscall at all.
6612@@ -1747,6 +1751,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6613
6614 secure_computing_strict(regs->gpr[0]);
6615
6616+#ifdef CONFIG_GRKERNSEC_SETXID
6617+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6618+ gr_delayed_cred_worker();
6619+#endif
6620+
6621 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6622 tracehook_report_syscall_entry(regs))
6623 /*
6624@@ -1781,6 +1790,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6625 {
6626 int step;
6627
6628+#ifdef CONFIG_GRKERNSEC_SETXID
6629+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6630+ gr_delayed_cred_worker();
6631+#endif
6632+
6633 audit_syscall_exit(regs);
6634
6635 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6636diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6637index 804e323..79181c1 100644
6638--- a/arch/powerpc/kernel/signal_32.c
6639+++ b/arch/powerpc/kernel/signal_32.c
6640@@ -851,7 +851,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6641 /* Save user registers on the stack */
6642 frame = &rt_sf->uc.uc_mcontext;
6643 addr = frame;
6644- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6645+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6646 if (save_user_regs(regs, frame, 0, 1))
6647 goto badframe;
6648 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6649diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6650index 1ca045d..139c3f7 100644
6651--- a/arch/powerpc/kernel/signal_64.c
6652+++ b/arch/powerpc/kernel/signal_64.c
6653@@ -430,7 +430,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6654 current->thread.fpscr.val = 0;
6655
6656 /* Set up to return from userspace. */
6657- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6658+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6659 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6660 } else {
6661 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6662diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6663index 3ce1f86..c30e629 100644
6664--- a/arch/powerpc/kernel/sysfs.c
6665+++ b/arch/powerpc/kernel/sysfs.c
6666@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6667 return NOTIFY_OK;
6668 }
6669
6670-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6671+static struct notifier_block sysfs_cpu_nb = {
6672 .notifier_call = sysfs_cpu_notify,
6673 };
6674
6675diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6676index 3251840..3f7c77a 100644
6677--- a/arch/powerpc/kernel/traps.c
6678+++ b/arch/powerpc/kernel/traps.c
6679@@ -133,6 +133,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6680 return flags;
6681 }
6682
6683+extern void gr_handle_kernel_exploit(void);
6684+
6685 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6686 int signr)
6687 {
6688@@ -182,6 +184,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6689 panic("Fatal exception in interrupt");
6690 if (panic_on_oops)
6691 panic("Fatal exception");
6692+
6693+ gr_handle_kernel_exploit();
6694+
6695 do_exit(signr);
6696 }
6697
6698diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6699index 1b2076f..835e4be 100644
6700--- a/arch/powerpc/kernel/vdso.c
6701+++ b/arch/powerpc/kernel/vdso.c
6702@@ -34,6 +34,7 @@
6703 #include <asm/firmware.h>
6704 #include <asm/vdso.h>
6705 #include <asm/vdso_datapage.h>
6706+#include <asm/mman.h>
6707
6708 #include "setup.h"
6709
6710@@ -218,7 +219,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6711 vdso_base = VDSO32_MBASE;
6712 #endif
6713
6714- current->mm->context.vdso_base = 0;
6715+ current->mm->context.vdso_base = ~0UL;
6716
6717 /* vDSO has a problem and was disabled, just don't "enable" it for the
6718 * process
6719@@ -238,7 +239,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6720 vdso_base = get_unmapped_area(NULL, vdso_base,
6721 (vdso_pages << PAGE_SHIFT) +
6722 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6723- 0, 0);
6724+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
6725 if (IS_ERR_VALUE(vdso_base)) {
6726 rc = vdso_base;
6727 goto fail_mmapsem;
6728diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6729index 5eea6f3..5d10396 100644
6730--- a/arch/powerpc/lib/usercopy_64.c
6731+++ b/arch/powerpc/lib/usercopy_64.c
6732@@ -9,22 +9,6 @@
6733 #include <linux/module.h>
6734 #include <asm/uaccess.h>
6735
6736-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6737-{
6738- if (likely(access_ok(VERIFY_READ, from, n)))
6739- n = __copy_from_user(to, from, n);
6740- else
6741- memset(to, 0, n);
6742- return n;
6743-}
6744-
6745-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6746-{
6747- if (likely(access_ok(VERIFY_WRITE, to, n)))
6748- n = __copy_to_user(to, from, n);
6749- return n;
6750-}
6751-
6752 unsigned long copy_in_user(void __user *to, const void __user *from,
6753 unsigned long n)
6754 {
6755@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6756 return n;
6757 }
6758
6759-EXPORT_SYMBOL(copy_from_user);
6760-EXPORT_SYMBOL(copy_to_user);
6761 EXPORT_SYMBOL(copy_in_user);
6762
6763diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6764index 3a8489a..6a63b3b 100644
6765--- a/arch/powerpc/mm/fault.c
6766+++ b/arch/powerpc/mm/fault.c
6767@@ -32,6 +32,10 @@
6768 #include <linux/perf_event.h>
6769 #include <linux/magic.h>
6770 #include <linux/ratelimit.h>
6771+#include <linux/slab.h>
6772+#include <linux/pagemap.h>
6773+#include <linux/compiler.h>
6774+#include <linux/unistd.h>
6775
6776 #include <asm/firmware.h>
6777 #include <asm/page.h>
6778@@ -68,6 +72,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
6779 }
6780 #endif
6781
6782+#ifdef CONFIG_PAX_PAGEEXEC
6783+/*
6784+ * PaX: decide what to do with offenders (regs->nip = fault address)
6785+ *
6786+ * returns 1 when task should be killed
6787+ */
6788+static int pax_handle_fetch_fault(struct pt_regs *regs)
6789+{
6790+ return 1;
6791+}
6792+
6793+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6794+{
6795+ unsigned long i;
6796+
6797+ printk(KERN_ERR "PAX: bytes at PC: ");
6798+ for (i = 0; i < 5; i++) {
6799+ unsigned int c;
6800+ if (get_user(c, (unsigned int __user *)pc+i))
6801+ printk(KERN_CONT "???????? ");
6802+ else
6803+ printk(KERN_CONT "%08x ", c);
6804+ }
6805+ printk("\n");
6806+}
6807+#endif
6808+
6809 /*
6810 * Check whether the instruction at regs->nip is a store using
6811 * an update addressing form which will update r1.
6812@@ -213,7 +244,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
6813 * indicate errors in DSISR but can validly be set in SRR1.
6814 */
6815 if (trap == 0x400)
6816- error_code &= 0x48200000;
6817+ error_code &= 0x58200000;
6818 else
6819 is_write = error_code & DSISR_ISSTORE;
6820 #else
6821@@ -364,7 +395,7 @@ good_area:
6822 * "undefined". Of those that can be set, this is the only
6823 * one which seems bad.
6824 */
6825- if (error_code & 0x10000000)
6826+ if (error_code & DSISR_GUARDED)
6827 /* Guarded storage error. */
6828 goto bad_area;
6829 #endif /* CONFIG_8xx */
6830@@ -379,7 +410,7 @@ good_area:
6831 * processors use the same I/D cache coherency mechanism
6832 * as embedded.
6833 */
6834- if (error_code & DSISR_PROTFAULT)
6835+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
6836 goto bad_area;
6837 #endif /* CONFIG_PPC_STD_MMU */
6838
6839@@ -462,6 +493,23 @@ bad_area:
6840 bad_area_nosemaphore:
6841 /* User mode accesses cause a SIGSEGV */
6842 if (user_mode(regs)) {
6843+
6844+#ifdef CONFIG_PAX_PAGEEXEC
6845+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
6846+#ifdef CONFIG_PPC_STD_MMU
6847+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
6848+#else
6849+ if (is_exec && regs->nip == address) {
6850+#endif
6851+ switch (pax_handle_fetch_fault(regs)) {
6852+ }
6853+
6854+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
6855+ do_group_exit(SIGKILL);
6856+ }
6857+ }
6858+#endif
6859+
6860 _exception(SIGSEGV, regs, code, address);
6861 return 0;
6862 }
6863diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
6864index 67a42ed..cd463e0 100644
6865--- a/arch/powerpc/mm/mmap_64.c
6866+++ b/arch/powerpc/mm/mmap_64.c
6867@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
6868 {
6869 unsigned long rnd = 0;
6870
6871+#ifdef CONFIG_PAX_RANDMMAP
6872+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
6873+#endif
6874+
6875 if (current->flags & PF_RANDOMIZE) {
6876 /* 8MB for 32bit, 1GB for 64bit */
6877 if (is_32bit_task())
6878@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
6879 */
6880 if (mmap_is_legacy()) {
6881 mm->mmap_base = TASK_UNMAPPED_BASE;
6882+
6883+#ifdef CONFIG_PAX_RANDMMAP
6884+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6885+ mm->mmap_base += mm->delta_mmap;
6886+#endif
6887+
6888 mm->get_unmapped_area = arch_get_unmapped_area;
6889 mm->unmap_area = arch_unmap_area;
6890 } else {
6891 mm->mmap_base = mmap_base();
6892+
6893+#ifdef CONFIG_PAX_RANDMMAP
6894+ if (mm->pax_flags & MF_PAX_RANDMMAP)
6895+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
6896+#endif
6897+
6898 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
6899 mm->unmap_area = arch_unmap_area_topdown;
6900 }
6901diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
6902index e779642..e5bb889 100644
6903--- a/arch/powerpc/mm/mmu_context_nohash.c
6904+++ b/arch/powerpc/mm/mmu_context_nohash.c
6905@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
6906 return NOTIFY_OK;
6907 }
6908
6909-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
6910+static struct notifier_block mmu_context_cpu_nb = {
6911 .notifier_call = mmu_context_cpu_notify,
6912 };
6913
6914diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
6915index bba87ca..c346a33 100644
6916--- a/arch/powerpc/mm/numa.c
6917+++ b/arch/powerpc/mm/numa.c
6918@@ -932,7 +932,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
6919 return ret;
6920 }
6921
6922-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
6923+static struct notifier_block ppc64_numa_nb = {
6924 .notifier_call = cpu_numa_callback,
6925 .priority = 1 /* Must run before sched domains notifier. */
6926 };
6927diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
6928index cf9dada..241529f 100644
6929--- a/arch/powerpc/mm/slice.c
6930+++ b/arch/powerpc/mm/slice.c
6931@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
6932 if ((mm->task_size - len) < addr)
6933 return 0;
6934 vma = find_vma(mm, addr);
6935- return (!vma || (addr + len) <= vma->vm_start);
6936+ return check_heap_stack_gap(vma, addr, len, 0);
6937 }
6938
6939 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
6940@@ -272,7 +272,7 @@ full_search:
6941 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
6942 continue;
6943 }
6944- if (!vma || addr + len <= vma->vm_start) {
6945+ if (check_heap_stack_gap(vma, addr, len, 0)) {
6946 /*
6947 * Remember the place where we stopped the search:
6948 */
6949@@ -329,10 +329,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6950 }
6951 }
6952
6953- addr = mm->mmap_base;
6954- while (addr > len) {
6955+ if (mm->mmap_base < len)
6956+ addr = -ENOMEM;
6957+ else
6958+ addr = mm->mmap_base - len;
6959+
6960+ while (!IS_ERR_VALUE(addr)) {
6961 /* Go down by chunk size */
6962- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
6963+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
6964
6965 /* Check for hit with different page size */
6966 mask = slice_range_to_mask(addr, len);
6967@@ -352,7 +356,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6968 * return with success:
6969 */
6970 vma = find_vma(mm, addr);
6971- if (!vma || (addr + len) <= vma->vm_start) {
6972+ if (check_heap_stack_gap(vma, addr, len, 0)) {
6973 /* remember the address as a hint for next time */
6974 if (use_cache)
6975 mm->free_area_cache = addr;
6976@@ -364,7 +368,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
6977 mm->cached_hole_size = vma->vm_start - addr;
6978
6979 /* try just below the current vma->vm_start */
6980- addr = vma->vm_start;
6981+ addr = skip_heap_stack_gap(vma, len, 0);
6982 }
6983
6984 /*
6985@@ -442,6 +446,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
6986 if (fixed && addr > (mm->task_size - len))
6987 return -EINVAL;
6988
6989+#ifdef CONFIG_PAX_RANDMMAP
6990+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
6991+ addr = 0;
6992+#endif
6993+
6994 /* If hint, make sure it matches our alignment restrictions */
6995 if (!fixed && addr) {
6996 addr = _ALIGN_UP(addr, 1ul << pshift);
6997diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
6998index bdb738a..49c9f95 100644
6999--- a/arch/powerpc/platforms/powermac/smp.c
7000+++ b/arch/powerpc/platforms/powermac/smp.c
7001@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7002 return NOTIFY_OK;
7003 }
7004
7005-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7006+static struct notifier_block smp_core99_cpu_nb = {
7007 .notifier_call = smp_core99_cpu_notify,
7008 };
7009 #endif /* CONFIG_HOTPLUG_CPU */
7010diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7011index c797832..ce575c8 100644
7012--- a/arch/s390/include/asm/atomic.h
7013+++ b/arch/s390/include/asm/atomic.h
7014@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7015 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7016 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7017
7018+#define atomic64_read_unchecked(v) atomic64_read(v)
7019+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7020+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7021+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7022+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7023+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7024+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7025+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7026+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7027+
7028 #define smp_mb__before_atomic_dec() smp_mb()
7029 #define smp_mb__after_atomic_dec() smp_mb()
7030 #define smp_mb__before_atomic_inc() smp_mb()
7031diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7032index 4d7ccac..d03d0ad 100644
7033--- a/arch/s390/include/asm/cache.h
7034+++ b/arch/s390/include/asm/cache.h
7035@@ -9,8 +9,10 @@
7036 #ifndef __ARCH_S390_CACHE_H
7037 #define __ARCH_S390_CACHE_H
7038
7039-#define L1_CACHE_BYTES 256
7040+#include <linux/const.h>
7041+
7042 #define L1_CACHE_SHIFT 8
7043+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7044 #define NET_SKB_PAD 32
7045
7046 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7047diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7048index 178ff96..8c93bd1 100644
7049--- a/arch/s390/include/asm/elf.h
7050+++ b/arch/s390/include/asm/elf.h
7051@@ -160,8 +160,14 @@ extern unsigned int vdso_enabled;
7052 the loader. We need to make sure that it is out of the way of the program
7053 that it will "exec", and that there is sufficient room for the brk. */
7054
7055-extern unsigned long randomize_et_dyn(unsigned long base);
7056-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7057+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7058+
7059+#ifdef CONFIG_PAX_ASLR
7060+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7061+
7062+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7063+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7064+#endif
7065
7066 /* This yields a mask that user programs can use to figure out what
7067 instruction set this CPU supports. */
7068@@ -210,9 +216,6 @@ struct linux_binprm;
7069 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7070 int arch_setup_additional_pages(struct linux_binprm *, int);
7071
7072-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7073-#define arch_randomize_brk arch_randomize_brk
7074-
7075 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7076
7077 #endif
7078diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7079index c4a93d6..4d2a9b4 100644
7080--- a/arch/s390/include/asm/exec.h
7081+++ b/arch/s390/include/asm/exec.h
7082@@ -7,6 +7,6 @@
7083 #ifndef __ASM_EXEC_H
7084 #define __ASM_EXEC_H
7085
7086-extern unsigned long arch_align_stack(unsigned long sp);
7087+#define arch_align_stack(x) ((x) & ~0xfUL)
7088
7089 #endif /* __ASM_EXEC_H */
7090diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7091index 34268df..ea97318 100644
7092--- a/arch/s390/include/asm/uaccess.h
7093+++ b/arch/s390/include/asm/uaccess.h
7094@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7095 copy_to_user(void __user *to, const void *from, unsigned long n)
7096 {
7097 might_fault();
7098+
7099+ if ((long)n < 0)
7100+ return n;
7101+
7102 if (access_ok(VERIFY_WRITE, to, n))
7103 n = __copy_to_user(to, from, n);
7104 return n;
7105@@ -277,6 +281,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7106 static inline unsigned long __must_check
7107 __copy_from_user(void *to, const void __user *from, unsigned long n)
7108 {
7109+ if ((long)n < 0)
7110+ return n;
7111+
7112 if (__builtin_constant_p(n) && (n <= 256))
7113 return uaccess.copy_from_user_small(n, from, to);
7114 else
7115@@ -308,10 +315,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7116 static inline unsigned long __must_check
7117 copy_from_user(void *to, const void __user *from, unsigned long n)
7118 {
7119- unsigned int sz = __compiletime_object_size(to);
7120+ size_t sz = __compiletime_object_size(to);
7121
7122 might_fault();
7123- if (unlikely(sz != -1 && sz < n)) {
7124+
7125+ if ((long)n < 0)
7126+ return n;
7127+
7128+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7129 copy_from_user_overflow();
7130 return n;
7131 }
7132diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7133index 4610dea..cf0af21 100644
7134--- a/arch/s390/kernel/module.c
7135+++ b/arch/s390/kernel/module.c
7136@@ -171,11 +171,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7137
7138 /* Increase core size by size of got & plt and set start
7139 offsets for got and plt. */
7140- me->core_size = ALIGN(me->core_size, 4);
7141- me->arch.got_offset = me->core_size;
7142- me->core_size += me->arch.got_size;
7143- me->arch.plt_offset = me->core_size;
7144- me->core_size += me->arch.plt_size;
7145+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7146+ me->arch.got_offset = me->core_size_rw;
7147+ me->core_size_rw += me->arch.got_size;
7148+ me->arch.plt_offset = me->core_size_rx;
7149+ me->core_size_rx += me->arch.plt_size;
7150 return 0;
7151 }
7152
7153@@ -252,7 +252,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7154 if (info->got_initialized == 0) {
7155 Elf_Addr *gotent;
7156
7157- gotent = me->module_core + me->arch.got_offset +
7158+ gotent = me->module_core_rw + me->arch.got_offset +
7159 info->got_offset;
7160 *gotent = val;
7161 info->got_initialized = 1;
7162@@ -276,7 +276,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7163 else if (r_type == R_390_GOTENT ||
7164 r_type == R_390_GOTPLTENT)
7165 *(unsigned int *) loc =
7166- (val + (Elf_Addr) me->module_core - loc) >> 1;
7167+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
7168 else if (r_type == R_390_GOT64 ||
7169 r_type == R_390_GOTPLT64)
7170 *(unsigned long *) loc = val;
7171@@ -290,7 +290,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7172 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7173 if (info->plt_initialized == 0) {
7174 unsigned int *ip;
7175- ip = me->module_core + me->arch.plt_offset +
7176+ ip = me->module_core_rx + me->arch.plt_offset +
7177 info->plt_offset;
7178 #ifndef CONFIG_64BIT
7179 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7180@@ -315,7 +315,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7181 val - loc + 0xffffUL < 0x1ffffeUL) ||
7182 (r_type == R_390_PLT32DBL &&
7183 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7184- val = (Elf_Addr) me->module_core +
7185+ val = (Elf_Addr) me->module_core_rx +
7186 me->arch.plt_offset +
7187 info->plt_offset;
7188 val += rela->r_addend - loc;
7189@@ -337,7 +337,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7190 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7191 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7192 val = val + rela->r_addend -
7193- ((Elf_Addr) me->module_core + me->arch.got_offset);
7194+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7195 if (r_type == R_390_GOTOFF16)
7196 *(unsigned short *) loc = val;
7197 else if (r_type == R_390_GOTOFF32)
7198@@ -347,7 +347,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7199 break;
7200 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7201 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7202- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7203+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7204 rela->r_addend - loc;
7205 if (r_type == R_390_GOTPC)
7206 *(unsigned int *) loc = val;
7207diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7208index 536d645..4a5bd9e 100644
7209--- a/arch/s390/kernel/process.c
7210+++ b/arch/s390/kernel/process.c
7211@@ -250,39 +250,3 @@ unsigned long get_wchan(struct task_struct *p)
7212 }
7213 return 0;
7214 }
7215-
7216-unsigned long arch_align_stack(unsigned long sp)
7217-{
7218- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7219- sp -= get_random_int() & ~PAGE_MASK;
7220- return sp & ~0xf;
7221-}
7222-
7223-static inline unsigned long brk_rnd(void)
7224-{
7225- /* 8MB for 32bit, 1GB for 64bit */
7226- if (is_32bit_task())
7227- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7228- else
7229- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7230-}
7231-
7232-unsigned long arch_randomize_brk(struct mm_struct *mm)
7233-{
7234- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7235-
7236- if (ret < mm->brk)
7237- return mm->brk;
7238- return ret;
7239-}
7240-
7241-unsigned long randomize_et_dyn(unsigned long base)
7242-{
7243- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7244-
7245- if (!(current->flags & PF_RANDOMIZE))
7246- return base;
7247- if (ret < base)
7248- return base;
7249- return ret;
7250-}
7251diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7252index c59a5ef..3fae59c 100644
7253--- a/arch/s390/mm/mmap.c
7254+++ b/arch/s390/mm/mmap.c
7255@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7256 */
7257 if (mmap_is_legacy()) {
7258 mm->mmap_base = TASK_UNMAPPED_BASE;
7259+
7260+#ifdef CONFIG_PAX_RANDMMAP
7261+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7262+ mm->mmap_base += mm->delta_mmap;
7263+#endif
7264+
7265 mm->get_unmapped_area = arch_get_unmapped_area;
7266 mm->unmap_area = arch_unmap_area;
7267 } else {
7268 mm->mmap_base = mmap_base();
7269+
7270+#ifdef CONFIG_PAX_RANDMMAP
7271+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7272+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7273+#endif
7274+
7275 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7276 mm->unmap_area = arch_unmap_area_topdown;
7277 }
7278@@ -172,10 +184,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7279 */
7280 if (mmap_is_legacy()) {
7281 mm->mmap_base = TASK_UNMAPPED_BASE;
7282+
7283+#ifdef CONFIG_PAX_RANDMMAP
7284+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7285+ mm->mmap_base += mm->delta_mmap;
7286+#endif
7287+
7288 mm->get_unmapped_area = s390_get_unmapped_area;
7289 mm->unmap_area = arch_unmap_area;
7290 } else {
7291 mm->mmap_base = mmap_base();
7292+
7293+#ifdef CONFIG_PAX_RANDMMAP
7294+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7295+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7296+#endif
7297+
7298 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7299 mm->unmap_area = arch_unmap_area_topdown;
7300 }
7301diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7302index ae3d59f..f65f075 100644
7303--- a/arch/score/include/asm/cache.h
7304+++ b/arch/score/include/asm/cache.h
7305@@ -1,7 +1,9 @@
7306 #ifndef _ASM_SCORE_CACHE_H
7307 #define _ASM_SCORE_CACHE_H
7308
7309+#include <linux/const.h>
7310+
7311 #define L1_CACHE_SHIFT 4
7312-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7313+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7314
7315 #endif /* _ASM_SCORE_CACHE_H */
7316diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7317index f9f3cd5..58ff438 100644
7318--- a/arch/score/include/asm/exec.h
7319+++ b/arch/score/include/asm/exec.h
7320@@ -1,6 +1,6 @@
7321 #ifndef _ASM_SCORE_EXEC_H
7322 #define _ASM_SCORE_EXEC_H
7323
7324-extern unsigned long arch_align_stack(unsigned long sp);
7325+#define arch_align_stack(x) (x)
7326
7327 #endif /* _ASM_SCORE_EXEC_H */
7328diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7329index 7956846..5f37677 100644
7330--- a/arch/score/kernel/process.c
7331+++ b/arch/score/kernel/process.c
7332@@ -134,8 +134,3 @@ unsigned long get_wchan(struct task_struct *task)
7333
7334 return task_pt_regs(task)->cp0_epc;
7335 }
7336-
7337-unsigned long arch_align_stack(unsigned long sp)
7338-{
7339- return sp;
7340-}
7341diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7342index ef9e555..331bd29 100644
7343--- a/arch/sh/include/asm/cache.h
7344+++ b/arch/sh/include/asm/cache.h
7345@@ -9,10 +9,11 @@
7346 #define __ASM_SH_CACHE_H
7347 #ifdef __KERNEL__
7348
7349+#include <linux/const.h>
7350 #include <linux/init.h>
7351 #include <cpu/cache.h>
7352
7353-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7354+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7355
7356 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7357
7358diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7359index 03f2b55..b027032 100644
7360--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7361+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7362@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7363 return NOTIFY_OK;
7364 }
7365
7366-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7367+static struct notifier_block shx3_cpu_notifier = {
7368 .notifier_call = shx3_cpu_callback,
7369 };
7370
7371diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7372index 6777177..cb5e44f 100644
7373--- a/arch/sh/mm/mmap.c
7374+++ b/arch/sh/mm/mmap.c
7375@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7376 struct mm_struct *mm = current->mm;
7377 struct vm_area_struct *vma;
7378 int do_colour_align;
7379+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7380 struct vm_unmapped_area_info info;
7381
7382 if (flags & MAP_FIXED) {
7383@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7384 if (filp || (flags & MAP_SHARED))
7385 do_colour_align = 1;
7386
7387+#ifdef CONFIG_PAX_RANDMMAP
7388+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7389+#endif
7390+
7391 if (addr) {
7392 if (do_colour_align)
7393 addr = COLOUR_ALIGN(addr, pgoff);
7394@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7395 addr = PAGE_ALIGN(addr);
7396
7397 vma = find_vma(mm, addr);
7398- if (TASK_SIZE - len >= addr &&
7399- (!vma || addr + len <= vma->vm_start))
7400+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7401 return addr;
7402 }
7403
7404 info.flags = 0;
7405 info.length = len;
7406- info.low_limit = TASK_UNMAPPED_BASE;
7407+ info.low_limit = mm->mmap_base;
7408 info.high_limit = TASK_SIZE;
7409 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7410 info.align_offset = pgoff << PAGE_SHIFT;
7411@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7412 struct mm_struct *mm = current->mm;
7413 unsigned long addr = addr0;
7414 int do_colour_align;
7415+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7416 struct vm_unmapped_area_info info;
7417
7418 if (flags & MAP_FIXED) {
7419@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7420 if (filp || (flags & MAP_SHARED))
7421 do_colour_align = 1;
7422
7423+#ifdef CONFIG_PAX_RANDMMAP
7424+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7425+#endif
7426+
7427 /* requesting a specific address */
7428 if (addr) {
7429 if (do_colour_align)
7430@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7431 addr = PAGE_ALIGN(addr);
7432
7433 vma = find_vma(mm, addr);
7434- if (TASK_SIZE - len >= addr &&
7435- (!vma || addr + len <= vma->vm_start))
7436+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7437 return addr;
7438 }
7439
7440@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7441 VM_BUG_ON(addr != -ENOMEM);
7442 info.flags = 0;
7443 info.low_limit = TASK_UNMAPPED_BASE;
7444+
7445+#ifdef CONFIG_PAX_RANDMMAP
7446+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7447+ info.low_limit += mm->delta_mmap;
7448+#endif
7449+
7450 info.high_limit = TASK_SIZE;
7451 addr = vm_unmapped_area(&info);
7452 }
7453diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7454index be56a24..443328f 100644
7455--- a/arch/sparc/include/asm/atomic_64.h
7456+++ b/arch/sparc/include/asm/atomic_64.h
7457@@ -14,18 +14,40 @@
7458 #define ATOMIC64_INIT(i) { (i) }
7459
7460 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7461+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7462+{
7463+ return v->counter;
7464+}
7465 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7466+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7467+{
7468+ return v->counter;
7469+}
7470
7471 #define atomic_set(v, i) (((v)->counter) = i)
7472+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7473+{
7474+ v->counter = i;
7475+}
7476 #define atomic64_set(v, i) (((v)->counter) = i)
7477+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7478+{
7479+ v->counter = i;
7480+}
7481
7482 extern void atomic_add(int, atomic_t *);
7483+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7484 extern void atomic64_add(long, atomic64_t *);
7485+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7486 extern void atomic_sub(int, atomic_t *);
7487+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7488 extern void atomic64_sub(long, atomic64_t *);
7489+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7490
7491 extern int atomic_add_ret(int, atomic_t *);
7492+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7493 extern long atomic64_add_ret(long, atomic64_t *);
7494+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7495 extern int atomic_sub_ret(int, atomic_t *);
7496 extern long atomic64_sub_ret(long, atomic64_t *);
7497
7498@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7499 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7500
7501 #define atomic_inc_return(v) atomic_add_ret(1, v)
7502+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7503+{
7504+ return atomic_add_ret_unchecked(1, v);
7505+}
7506 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7507+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7508+{
7509+ return atomic64_add_ret_unchecked(1, v);
7510+}
7511
7512 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7513 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7514
7515 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7516+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7517+{
7518+ return atomic_add_ret_unchecked(i, v);
7519+}
7520 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7521+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7522+{
7523+ return atomic64_add_ret_unchecked(i, v);
7524+}
7525
7526 /*
7527 * atomic_inc_and_test - increment and test
7528@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7529 * other cases.
7530 */
7531 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7532+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7533+{
7534+ return atomic_inc_return_unchecked(v) == 0;
7535+}
7536 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7537
7538 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7539@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7540 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7541
7542 #define atomic_inc(v) atomic_add(1, v)
7543+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7544+{
7545+ atomic_add_unchecked(1, v);
7546+}
7547 #define atomic64_inc(v) atomic64_add(1, v)
7548+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7549+{
7550+ atomic64_add_unchecked(1, v);
7551+}
7552
7553 #define atomic_dec(v) atomic_sub(1, v)
7554+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7555+{
7556+ atomic_sub_unchecked(1, v);
7557+}
7558 #define atomic64_dec(v) atomic64_sub(1, v)
7559+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7560+{
7561+ atomic64_sub_unchecked(1, v);
7562+}
7563
7564 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7565 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7566
7567 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7568+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7569+{
7570+ return cmpxchg(&v->counter, old, new);
7571+}
7572 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7573+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7574+{
7575+ return xchg(&v->counter, new);
7576+}
7577
7578 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7579 {
7580- int c, old;
7581+ int c, old, new;
7582 c = atomic_read(v);
7583 for (;;) {
7584- if (unlikely(c == (u)))
7585+ if (unlikely(c == u))
7586 break;
7587- old = atomic_cmpxchg((v), c, c + (a));
7588+
7589+ asm volatile("addcc %2, %0, %0\n"
7590+
7591+#ifdef CONFIG_PAX_REFCOUNT
7592+ "tvs %%icc, 6\n"
7593+#endif
7594+
7595+ : "=r" (new)
7596+ : "0" (c), "ir" (a)
7597+ : "cc");
7598+
7599+ old = atomic_cmpxchg(v, c, new);
7600 if (likely(old == c))
7601 break;
7602 c = old;
7603@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7604 #define atomic64_cmpxchg(v, o, n) \
7605 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7606 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7607+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7608+{
7609+ return xchg(&v->counter, new);
7610+}
7611
7612 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7613 {
7614- long c, old;
7615+ long c, old, new;
7616 c = atomic64_read(v);
7617 for (;;) {
7618- if (unlikely(c == (u)))
7619+ if (unlikely(c == u))
7620 break;
7621- old = atomic64_cmpxchg((v), c, c + (a));
7622+
7623+ asm volatile("addcc %2, %0, %0\n"
7624+
7625+#ifdef CONFIG_PAX_REFCOUNT
7626+ "tvs %%xcc, 6\n"
7627+#endif
7628+
7629+ : "=r" (new)
7630+ : "0" (c), "ir" (a)
7631+ : "cc");
7632+
7633+ old = atomic64_cmpxchg(v, c, new);
7634 if (likely(old == c))
7635 break;
7636 c = old;
7637 }
7638- return c != (u);
7639+ return c != u;
7640 }
7641
7642 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7643diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7644index 5bb6991..5c2132e 100644
7645--- a/arch/sparc/include/asm/cache.h
7646+++ b/arch/sparc/include/asm/cache.h
7647@@ -7,10 +7,12 @@
7648 #ifndef _SPARC_CACHE_H
7649 #define _SPARC_CACHE_H
7650
7651+#include <linux/const.h>
7652+
7653 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7654
7655 #define L1_CACHE_SHIFT 5
7656-#define L1_CACHE_BYTES 32
7657+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7658
7659 #ifdef CONFIG_SPARC32
7660 #define SMP_CACHE_BYTES_SHIFT 5
7661diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7662index ac74a2c..a9e58af 100644
7663--- a/arch/sparc/include/asm/elf_32.h
7664+++ b/arch/sparc/include/asm/elf_32.h
7665@@ -114,6 +114,13 @@ typedef struct {
7666
7667 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7668
7669+#ifdef CONFIG_PAX_ASLR
7670+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7671+
7672+#define PAX_DELTA_MMAP_LEN 16
7673+#define PAX_DELTA_STACK_LEN 16
7674+#endif
7675+
7676 /* This yields a mask that user programs can use to figure out what
7677 instruction set this cpu supports. This can NOT be done in userspace
7678 on Sparc. */
7679diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7680index 370ca1e..d4f4a98 100644
7681--- a/arch/sparc/include/asm/elf_64.h
7682+++ b/arch/sparc/include/asm/elf_64.h
7683@@ -189,6 +189,13 @@ typedef struct {
7684 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7685 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7686
7687+#ifdef CONFIG_PAX_ASLR
7688+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7689+
7690+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7691+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7692+#endif
7693+
7694 extern unsigned long sparc64_elf_hwcap;
7695 #define ELF_HWCAP sparc64_elf_hwcap
7696
7697diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7698index 9b1c36d..209298b 100644
7699--- a/arch/sparc/include/asm/pgalloc_32.h
7700+++ b/arch/sparc/include/asm/pgalloc_32.h
7701@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7702 }
7703
7704 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7705+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7706
7707 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7708 unsigned long address)
7709diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7710index bcfe063..b333142 100644
7711--- a/arch/sparc/include/asm/pgalloc_64.h
7712+++ b/arch/sparc/include/asm/pgalloc_64.h
7713@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7714 }
7715
7716 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7717+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7718
7719 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7720 {
7721diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7722index 6fc1348..390c50a 100644
7723--- a/arch/sparc/include/asm/pgtable_32.h
7724+++ b/arch/sparc/include/asm/pgtable_32.h
7725@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7726 #define PAGE_SHARED SRMMU_PAGE_SHARED
7727 #define PAGE_COPY SRMMU_PAGE_COPY
7728 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7729+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7730+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7731+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7732 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7733
7734 /* Top-level page directory - dummy used by init-mm.
7735@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7736
7737 /* xwr */
7738 #define __P000 PAGE_NONE
7739-#define __P001 PAGE_READONLY
7740-#define __P010 PAGE_COPY
7741-#define __P011 PAGE_COPY
7742+#define __P001 PAGE_READONLY_NOEXEC
7743+#define __P010 PAGE_COPY_NOEXEC
7744+#define __P011 PAGE_COPY_NOEXEC
7745 #define __P100 PAGE_READONLY
7746 #define __P101 PAGE_READONLY
7747 #define __P110 PAGE_COPY
7748 #define __P111 PAGE_COPY
7749
7750 #define __S000 PAGE_NONE
7751-#define __S001 PAGE_READONLY
7752-#define __S010 PAGE_SHARED
7753-#define __S011 PAGE_SHARED
7754+#define __S001 PAGE_READONLY_NOEXEC
7755+#define __S010 PAGE_SHARED_NOEXEC
7756+#define __S011 PAGE_SHARED_NOEXEC
7757 #define __S100 PAGE_READONLY
7758 #define __S101 PAGE_READONLY
7759 #define __S110 PAGE_SHARED
7760diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7761index 79da178..c2eede8 100644
7762--- a/arch/sparc/include/asm/pgtsrmmu.h
7763+++ b/arch/sparc/include/asm/pgtsrmmu.h
7764@@ -115,6 +115,11 @@
7765 SRMMU_EXEC | SRMMU_REF)
7766 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7767 SRMMU_EXEC | SRMMU_REF)
7768+
7769+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7770+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7771+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7772+
7773 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7774 SRMMU_DIRTY | SRMMU_REF)
7775
7776diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7777index 9689176..63c18ea 100644
7778--- a/arch/sparc/include/asm/spinlock_64.h
7779+++ b/arch/sparc/include/asm/spinlock_64.h
7780@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7781
7782 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7783
7784-static void inline arch_read_lock(arch_rwlock_t *lock)
7785+static inline void arch_read_lock(arch_rwlock_t *lock)
7786 {
7787 unsigned long tmp1, tmp2;
7788
7789 __asm__ __volatile__ (
7790 "1: ldsw [%2], %0\n"
7791 " brlz,pn %0, 2f\n"
7792-"4: add %0, 1, %1\n"
7793+"4: addcc %0, 1, %1\n"
7794+
7795+#ifdef CONFIG_PAX_REFCOUNT
7796+" tvs %%icc, 6\n"
7797+#endif
7798+
7799 " cas [%2], %0, %1\n"
7800 " cmp %0, %1\n"
7801 " bne,pn %%icc, 1b\n"
7802@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
7803 " .previous"
7804 : "=&r" (tmp1), "=&r" (tmp2)
7805 : "r" (lock)
7806- : "memory");
7807+ : "memory", "cc");
7808 }
7809
7810-static int inline arch_read_trylock(arch_rwlock_t *lock)
7811+static inline int arch_read_trylock(arch_rwlock_t *lock)
7812 {
7813 int tmp1, tmp2;
7814
7815@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7816 "1: ldsw [%2], %0\n"
7817 " brlz,a,pn %0, 2f\n"
7818 " mov 0, %0\n"
7819-" add %0, 1, %1\n"
7820+" addcc %0, 1, %1\n"
7821+
7822+#ifdef CONFIG_PAX_REFCOUNT
7823+" tvs %%icc, 6\n"
7824+#endif
7825+
7826 " cas [%2], %0, %1\n"
7827 " cmp %0, %1\n"
7828 " bne,pn %%icc, 1b\n"
7829@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
7830 return tmp1;
7831 }
7832
7833-static void inline arch_read_unlock(arch_rwlock_t *lock)
7834+static inline void arch_read_unlock(arch_rwlock_t *lock)
7835 {
7836 unsigned long tmp1, tmp2;
7837
7838 __asm__ __volatile__(
7839 "1: lduw [%2], %0\n"
7840-" sub %0, 1, %1\n"
7841+" subcc %0, 1, %1\n"
7842+
7843+#ifdef CONFIG_PAX_REFCOUNT
7844+" tvs %%icc, 6\n"
7845+#endif
7846+
7847 " cas [%2], %0, %1\n"
7848 " cmp %0, %1\n"
7849 " bne,pn %%xcc, 1b\n"
7850@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
7851 : "memory");
7852 }
7853
7854-static void inline arch_write_lock(arch_rwlock_t *lock)
7855+static inline void arch_write_lock(arch_rwlock_t *lock)
7856 {
7857 unsigned long mask, tmp1, tmp2;
7858
7859@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
7860 : "memory");
7861 }
7862
7863-static void inline arch_write_unlock(arch_rwlock_t *lock)
7864+static inline void arch_write_unlock(arch_rwlock_t *lock)
7865 {
7866 __asm__ __volatile__(
7867 " stw %%g0, [%0]"
7868@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
7869 : "memory");
7870 }
7871
7872-static int inline arch_write_trylock(arch_rwlock_t *lock)
7873+static inline int arch_write_trylock(arch_rwlock_t *lock)
7874 {
7875 unsigned long mask, tmp1, tmp2, result;
7876
7877diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
7878index 25849ae..924c54b 100644
7879--- a/arch/sparc/include/asm/thread_info_32.h
7880+++ b/arch/sparc/include/asm/thread_info_32.h
7881@@ -49,6 +49,8 @@ struct thread_info {
7882 unsigned long w_saved;
7883
7884 struct restart_block restart_block;
7885+
7886+ unsigned long lowest_stack;
7887 };
7888
7889 /*
7890diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
7891index 269bd92..e46a9b8 100644
7892--- a/arch/sparc/include/asm/thread_info_64.h
7893+++ b/arch/sparc/include/asm/thread_info_64.h
7894@@ -63,6 +63,8 @@ struct thread_info {
7895 struct pt_regs *kern_una_regs;
7896 unsigned int kern_una_insn;
7897
7898+ unsigned long lowest_stack;
7899+
7900 unsigned long fpregs[0] __attribute__ ((aligned(64)));
7901 };
7902
7903@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
7904 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
7905 /* flag bit 6 is available */
7906 #define TIF_32BIT 7 /* 32-bit binary */
7907-/* flag bit 8 is available */
7908+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
7909 #define TIF_SECCOMP 9 /* secure computing */
7910 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
7911 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
7912+
7913 /* NOTE: Thread flags >= 12 should be ones we have no interest
7914 * in using in assembly, else we can't use the mask as
7915 * an immediate value in instructions such as andcc.
7916@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
7917 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
7918 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
7919 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
7920+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
7921
7922 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
7923 _TIF_DO_NOTIFY_RESUME_MASK | \
7924 _TIF_NEED_RESCHED)
7925 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
7926
7927+#define _TIF_WORK_SYSCALL \
7928+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
7929+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
7930+
7931+
7932 /*
7933 * Thread-synchronous status.
7934 *
7935diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
7936index 0167d26..9acd8ed 100644
7937--- a/arch/sparc/include/asm/uaccess.h
7938+++ b/arch/sparc/include/asm/uaccess.h
7939@@ -1,5 +1,13 @@
7940 #ifndef ___ASM_SPARC_UACCESS_H
7941 #define ___ASM_SPARC_UACCESS_H
7942+
7943+#ifdef __KERNEL__
7944+#ifndef __ASSEMBLY__
7945+#include <linux/types.h>
7946+extern void check_object_size(const void *ptr, unsigned long n, bool to);
7947+#endif
7948+#endif
7949+
7950 #if defined(__sparc__) && defined(__arch64__)
7951 #include <asm/uaccess_64.h>
7952 #else
7953diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
7954index 53a28dd..50c38c3 100644
7955--- a/arch/sparc/include/asm/uaccess_32.h
7956+++ b/arch/sparc/include/asm/uaccess_32.h
7957@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
7958
7959 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
7960 {
7961- if (n && __access_ok((unsigned long) to, n))
7962+ if ((long)n < 0)
7963+ return n;
7964+
7965+ if (n && __access_ok((unsigned long) to, n)) {
7966+ if (!__builtin_constant_p(n))
7967+ check_object_size(from, n, true);
7968 return __copy_user(to, (__force void __user *) from, n);
7969- else
7970+ } else
7971 return n;
7972 }
7973
7974 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
7975 {
7976+ if ((long)n < 0)
7977+ return n;
7978+
7979+ if (!__builtin_constant_p(n))
7980+ check_object_size(from, n, true);
7981+
7982 return __copy_user(to, (__force void __user *) from, n);
7983 }
7984
7985 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
7986 {
7987- if (n && __access_ok((unsigned long) from, n))
7988+ if ((long)n < 0)
7989+ return n;
7990+
7991+ if (n && __access_ok((unsigned long) from, n)) {
7992+ if (!__builtin_constant_p(n))
7993+ check_object_size(to, n, false);
7994 return __copy_user((__force void __user *) to, from, n);
7995- else
7996+ } else
7997 return n;
7998 }
7999
8000 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8001 {
8002+ if ((long)n < 0)
8003+ return n;
8004+
8005 return __copy_user((__force void __user *) to, from, n);
8006 }
8007
8008diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8009index e562d3c..191f176 100644
8010--- a/arch/sparc/include/asm/uaccess_64.h
8011+++ b/arch/sparc/include/asm/uaccess_64.h
8012@@ -10,6 +10,7 @@
8013 #include <linux/compiler.h>
8014 #include <linux/string.h>
8015 #include <linux/thread_info.h>
8016+#include <linux/kernel.h>
8017 #include <asm/asi.h>
8018 #include <asm/spitfire.h>
8019 #include <asm-generic/uaccess-unaligned.h>
8020@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8021 static inline unsigned long __must_check
8022 copy_from_user(void *to, const void __user *from, unsigned long size)
8023 {
8024- unsigned long ret = ___copy_from_user(to, from, size);
8025+ unsigned long ret;
8026
8027+ if ((long)size < 0 || size > INT_MAX)
8028+ return size;
8029+
8030+ if (!__builtin_constant_p(size))
8031+ check_object_size(to, size, false);
8032+
8033+ ret = ___copy_from_user(to, from, size);
8034 if (unlikely(ret))
8035 ret = copy_from_user_fixup(to, from, size);
8036
8037@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8038 static inline unsigned long __must_check
8039 copy_to_user(void __user *to, const void *from, unsigned long size)
8040 {
8041- unsigned long ret = ___copy_to_user(to, from, size);
8042+ unsigned long ret;
8043
8044+ if ((long)size < 0 || size > INT_MAX)
8045+ return size;
8046+
8047+ if (!__builtin_constant_p(size))
8048+ check_object_size(from, size, true);
8049+
8050+ ret = ___copy_to_user(to, from, size);
8051 if (unlikely(ret))
8052 ret = copy_to_user_fixup(to, from, size);
8053 return ret;
8054diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8055index 6cf591b..b49e65a 100644
8056--- a/arch/sparc/kernel/Makefile
8057+++ b/arch/sparc/kernel/Makefile
8058@@ -3,7 +3,7 @@
8059 #
8060
8061 asflags-y := -ansi
8062-ccflags-y := -Werror
8063+#ccflags-y := -Werror
8064
8065 extra-y := head_$(BITS).o
8066
8067diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8068index be8e862..5b50b12 100644
8069--- a/arch/sparc/kernel/process_32.c
8070+++ b/arch/sparc/kernel/process_32.c
8071@@ -126,14 +126,14 @@ void show_regs(struct pt_regs *r)
8072
8073 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8074 r->psr, r->pc, r->npc, r->y, print_tainted());
8075- printk("PC: <%pS>\n", (void *) r->pc);
8076+ printk("PC: <%pA>\n", (void *) r->pc);
8077 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8078 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8079 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8080 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8081 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8082 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8083- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8084+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8085
8086 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8087 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8088@@ -168,7 +168,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8089 rw = (struct reg_window32 *) fp;
8090 pc = rw->ins[7];
8091 printk("[%08lx : ", pc);
8092- printk("%pS ] ", (void *) pc);
8093+ printk("%pA ] ", (void *) pc);
8094 fp = rw->ins[6];
8095 } while (++count < 16);
8096 printk("\n");
8097diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8098index cdb80b2..5ca141d 100644
8099--- a/arch/sparc/kernel/process_64.c
8100+++ b/arch/sparc/kernel/process_64.c
8101@@ -181,14 +181,14 @@ static void show_regwindow(struct pt_regs *regs)
8102 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8103 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8104 if (regs->tstate & TSTATE_PRIV)
8105- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8106+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8107 }
8108
8109 void show_regs(struct pt_regs *regs)
8110 {
8111 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8112 regs->tpc, regs->tnpc, regs->y, print_tainted());
8113- printk("TPC: <%pS>\n", (void *) regs->tpc);
8114+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8115 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8116 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8117 regs->u_regs[3]);
8118@@ -201,7 +201,7 @@ void show_regs(struct pt_regs *regs)
8119 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8120 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8121 regs->u_regs[15]);
8122- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8123+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8124 show_regwindow(regs);
8125 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8126 }
8127@@ -290,7 +290,7 @@ void arch_trigger_all_cpu_backtrace(void)
8128 ((tp && tp->task) ? tp->task->pid : -1));
8129
8130 if (gp->tstate & TSTATE_PRIV) {
8131- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8132+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8133 (void *) gp->tpc,
8134 (void *) gp->o7,
8135 (void *) gp->i7,
8136diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8137index 7ff45e4..a58f271 100644
8138--- a/arch/sparc/kernel/ptrace_64.c
8139+++ b/arch/sparc/kernel/ptrace_64.c
8140@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8141 return ret;
8142 }
8143
8144+#ifdef CONFIG_GRKERNSEC_SETXID
8145+extern void gr_delayed_cred_worker(void);
8146+#endif
8147+
8148 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8149 {
8150 int ret = 0;
8151@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8152 /* do the secure computing check first */
8153 secure_computing_strict(regs->u_regs[UREG_G1]);
8154
8155+#ifdef CONFIG_GRKERNSEC_SETXID
8156+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8157+ gr_delayed_cred_worker();
8158+#endif
8159+
8160 if (test_thread_flag(TIF_SYSCALL_TRACE))
8161 ret = tracehook_report_syscall_entry(regs);
8162
8163@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8164
8165 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8166 {
8167+#ifdef CONFIG_GRKERNSEC_SETXID
8168+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8169+ gr_delayed_cred_worker();
8170+#endif
8171+
8172 audit_syscall_exit(regs);
8173
8174 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8175diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8176index 2da0bdc..79128d2 100644
8177--- a/arch/sparc/kernel/sys_sparc_32.c
8178+++ b/arch/sparc/kernel/sys_sparc_32.c
8179@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8180 if (len > TASK_SIZE - PAGE_SIZE)
8181 return -ENOMEM;
8182 if (!addr)
8183- addr = TASK_UNMAPPED_BASE;
8184+ addr = current->mm->mmap_base;
8185
8186 info.flags = 0;
8187 info.length = len;
8188diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8189index 708bc29..f0129cb 100644
8190--- a/arch/sparc/kernel/sys_sparc_64.c
8191+++ b/arch/sparc/kernel/sys_sparc_64.c
8192@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8193 struct vm_area_struct * vma;
8194 unsigned long task_size = TASK_SIZE;
8195 int do_color_align;
8196+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8197 struct vm_unmapped_area_info info;
8198
8199 if (flags & MAP_FIXED) {
8200 /* We do not accept a shared mapping if it would violate
8201 * cache aliasing constraints.
8202 */
8203- if ((flags & MAP_SHARED) &&
8204+ if ((filp || (flags & MAP_SHARED)) &&
8205 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8206 return -EINVAL;
8207 return addr;
8208@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8209 if (filp || (flags & MAP_SHARED))
8210 do_color_align = 1;
8211
8212+#ifdef CONFIG_PAX_RANDMMAP
8213+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8214+#endif
8215+
8216 if (addr) {
8217 if (do_color_align)
8218 addr = COLOR_ALIGN(addr, pgoff);
8219@@ -118,14 +123,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8220 addr = PAGE_ALIGN(addr);
8221
8222 vma = find_vma(mm, addr);
8223- if (task_size - len >= addr &&
8224- (!vma || addr + len <= vma->vm_start))
8225+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8226 return addr;
8227 }
8228
8229 info.flags = 0;
8230 info.length = len;
8231- info.low_limit = TASK_UNMAPPED_BASE;
8232+ info.low_limit = mm->mmap_base;
8233 info.high_limit = min(task_size, VA_EXCLUDE_START);
8234 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8235 info.align_offset = pgoff << PAGE_SHIFT;
8236@@ -134,6 +138,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8237 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8238 VM_BUG_ON(addr != -ENOMEM);
8239 info.low_limit = VA_EXCLUDE_END;
8240+
8241+#ifdef CONFIG_PAX_RANDMMAP
8242+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8243+ info.low_limit += mm->delta_mmap;
8244+#endif
8245+
8246 info.high_limit = task_size;
8247 addr = vm_unmapped_area(&info);
8248 }
8249@@ -151,6 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8250 unsigned long task_size = STACK_TOP32;
8251 unsigned long addr = addr0;
8252 int do_color_align;
8253+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8254 struct vm_unmapped_area_info info;
8255
8256 /* This should only ever run for 32-bit processes. */
8257@@ -160,7 +171,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8258 /* We do not accept a shared mapping if it would violate
8259 * cache aliasing constraints.
8260 */
8261- if ((flags & MAP_SHARED) &&
8262+ if ((filp || (flags & MAP_SHARED)) &&
8263 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8264 return -EINVAL;
8265 return addr;
8266@@ -173,6 +184,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8267 if (filp || (flags & MAP_SHARED))
8268 do_color_align = 1;
8269
8270+#ifdef CONFIG_PAX_RANDMMAP
8271+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8272+#endif
8273+
8274 /* requesting a specific address */
8275 if (addr) {
8276 if (do_color_align)
8277@@ -181,8 +196,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8278 addr = PAGE_ALIGN(addr);
8279
8280 vma = find_vma(mm, addr);
8281- if (task_size - len >= addr &&
8282- (!vma || addr + len <= vma->vm_start))
8283+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8284 return addr;
8285 }
8286
8287@@ -204,6 +218,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8288 VM_BUG_ON(addr != -ENOMEM);
8289 info.flags = 0;
8290 info.low_limit = TASK_UNMAPPED_BASE;
8291+
8292+#ifdef CONFIG_PAX_RANDMMAP
8293+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8294+ info.low_limit += mm->delta_mmap;
8295+#endif
8296+
8297 info.high_limit = STACK_TOP32;
8298 addr = vm_unmapped_area(&info);
8299 }
8300@@ -264,6 +284,10 @@ static unsigned long mmap_rnd(void)
8301 {
8302 unsigned long rnd = 0UL;
8303
8304+#ifdef CONFIG_PAX_RANDMMAP
8305+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8306+#endif
8307+
8308 if (current->flags & PF_RANDOMIZE) {
8309 unsigned long val = get_random_int();
8310 if (test_thread_flag(TIF_32BIT))
8311@@ -289,6 +313,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8312 gap == RLIM_INFINITY ||
8313 sysctl_legacy_va_layout) {
8314 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8315+
8316+#ifdef CONFIG_PAX_RANDMMAP
8317+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8318+ mm->mmap_base += mm->delta_mmap;
8319+#endif
8320+
8321 mm->get_unmapped_area = arch_get_unmapped_area;
8322 mm->unmap_area = arch_unmap_area;
8323 } else {
8324@@ -301,6 +331,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8325 gap = (task_size / 6 * 5);
8326
8327 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8328+
8329+#ifdef CONFIG_PAX_RANDMMAP
8330+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8331+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8332+#endif
8333+
8334 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8335 mm->unmap_area = arch_unmap_area_topdown;
8336 }
8337diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8338index e0fed77..604a7e5 100644
8339--- a/arch/sparc/kernel/syscalls.S
8340+++ b/arch/sparc/kernel/syscalls.S
8341@@ -58,7 +58,7 @@ sys32_rt_sigreturn:
8342 #endif
8343 .align 32
8344 1: ldx [%g6 + TI_FLAGS], %l5
8345- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8346+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8347 be,pt %icc, rtrap
8348 nop
8349 call syscall_trace_leave
8350@@ -190,7 +190,7 @@ linux_sparc_syscall32:
8351
8352 srl %i5, 0, %o5 ! IEU1
8353 srl %i2, 0, %o2 ! IEU0 Group
8354- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8355+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8356 bne,pn %icc, linux_syscall_trace32 ! CTI
8357 mov %i0, %l5 ! IEU1
8358 call %l7 ! CTI Group brk forced
8359@@ -213,7 +213,7 @@ linux_sparc_syscall:
8360
8361 mov %i3, %o3 ! IEU1
8362 mov %i4, %o4 ! IEU0 Group
8363- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8364+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8365 bne,pn %icc, linux_syscall_trace ! CTI Group
8366 mov %i0, %l5 ! IEU0
8367 2: call %l7 ! CTI Group brk forced
8368@@ -229,7 +229,7 @@ ret_sys_call:
8369
8370 cmp %o0, -ERESTART_RESTARTBLOCK
8371 bgeu,pn %xcc, 1f
8372- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8373+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8374 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8375
8376 2:
8377diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8378index 654e8aa..45f431b 100644
8379--- a/arch/sparc/kernel/sysfs.c
8380+++ b/arch/sparc/kernel/sysfs.c
8381@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8382 return NOTIFY_OK;
8383 }
8384
8385-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8386+static struct notifier_block sysfs_cpu_nb = {
8387 .notifier_call = sysfs_cpu_notify,
8388 };
8389
8390diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8391index a5785ea..405c5f7 100644
8392--- a/arch/sparc/kernel/traps_32.c
8393+++ b/arch/sparc/kernel/traps_32.c
8394@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8395 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8396 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8397
8398+extern void gr_handle_kernel_exploit(void);
8399+
8400 void die_if_kernel(char *str, struct pt_regs *regs)
8401 {
8402 static int die_counter;
8403@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8404 count++ < 30 &&
8405 (((unsigned long) rw) >= PAGE_OFFSET) &&
8406 !(((unsigned long) rw) & 0x7)) {
8407- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8408+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8409 (void *) rw->ins[7]);
8410 rw = (struct reg_window32 *)rw->ins[6];
8411 }
8412 }
8413 printk("Instruction DUMP:");
8414 instruction_dump ((unsigned long *) regs->pc);
8415- if(regs->psr & PSR_PS)
8416+ if(regs->psr & PSR_PS) {
8417+ gr_handle_kernel_exploit();
8418 do_exit(SIGKILL);
8419+ }
8420 do_exit(SIGSEGV);
8421 }
8422
8423diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8424index e7ecf15..6520e65 100644
8425--- a/arch/sparc/kernel/traps_64.c
8426+++ b/arch/sparc/kernel/traps_64.c
8427@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8428 i + 1,
8429 p->trapstack[i].tstate, p->trapstack[i].tpc,
8430 p->trapstack[i].tnpc, p->trapstack[i].tt);
8431- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8432+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8433 }
8434 }
8435
8436@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8437
8438 lvl -= 0x100;
8439 if (regs->tstate & TSTATE_PRIV) {
8440+
8441+#ifdef CONFIG_PAX_REFCOUNT
8442+ if (lvl == 6)
8443+ pax_report_refcount_overflow(regs);
8444+#endif
8445+
8446 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8447 die_if_kernel(buffer, regs);
8448 }
8449@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8450 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8451 {
8452 char buffer[32];
8453-
8454+
8455 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8456 0, lvl, SIGTRAP) == NOTIFY_STOP)
8457 return;
8458
8459+#ifdef CONFIG_PAX_REFCOUNT
8460+ if (lvl == 6)
8461+ pax_report_refcount_overflow(regs);
8462+#endif
8463+
8464 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8465
8466 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8467@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8468 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8469 printk("%s" "ERROR(%d): ",
8470 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8471- printk("TPC<%pS>\n", (void *) regs->tpc);
8472+ printk("TPC<%pA>\n", (void *) regs->tpc);
8473 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8474 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8475 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8476@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8477 smp_processor_id(),
8478 (type & 0x1) ? 'I' : 'D',
8479 regs->tpc);
8480- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8481+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8482 panic("Irrecoverable Cheetah+ parity error.");
8483 }
8484
8485@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8486 smp_processor_id(),
8487 (type & 0x1) ? 'I' : 'D',
8488 regs->tpc);
8489- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8490+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8491 }
8492
8493 struct sun4v_error_entry {
8494@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8495
8496 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8497 regs->tpc, tl);
8498- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8499+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8500 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8501- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8502+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8503 (void *) regs->u_regs[UREG_I7]);
8504 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8505 "pte[%lx] error[%lx]\n",
8506@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8507
8508 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8509 regs->tpc, tl);
8510- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8511+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8512 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8513- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8514+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8515 (void *) regs->u_regs[UREG_I7]);
8516 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8517 "pte[%lx] error[%lx]\n",
8518@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8519 fp = (unsigned long)sf->fp + STACK_BIAS;
8520 }
8521
8522- printk(" [%016lx] %pS\n", pc, (void *) pc);
8523+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8524 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8525 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8526 int index = tsk->curr_ret_stack;
8527 if (tsk->ret_stack && index >= graph) {
8528 pc = tsk->ret_stack[index - graph].ret;
8529- printk(" [%016lx] %pS\n", pc, (void *) pc);
8530+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8531 graph++;
8532 }
8533 }
8534@@ -2367,6 +2378,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8535 return (struct reg_window *) (fp + STACK_BIAS);
8536 }
8537
8538+extern void gr_handle_kernel_exploit(void);
8539+
8540 void die_if_kernel(char *str, struct pt_regs *regs)
8541 {
8542 static int die_counter;
8543@@ -2395,7 +2408,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8544 while (rw &&
8545 count++ < 30 &&
8546 kstack_valid(tp, (unsigned long) rw)) {
8547- printk("Caller[%016lx]: %pS\n", rw->ins[7],
8548+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
8549 (void *) rw->ins[7]);
8550
8551 rw = kernel_stack_up(rw);
8552@@ -2408,8 +2421,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8553 }
8554 user_instruction_dump ((unsigned int __user *) regs->tpc);
8555 }
8556- if (regs->tstate & TSTATE_PRIV)
8557+ if (regs->tstate & TSTATE_PRIV) {
8558+ gr_handle_kernel_exploit();
8559 do_exit(SIGKILL);
8560+ }
8561 do_exit(SIGSEGV);
8562 }
8563 EXPORT_SYMBOL(die_if_kernel);
8564diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8565index 8201c25e..072a2a7 100644
8566--- a/arch/sparc/kernel/unaligned_64.c
8567+++ b/arch/sparc/kernel/unaligned_64.c
8568@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8569 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8570
8571 if (__ratelimit(&ratelimit)) {
8572- printk("Kernel unaligned access at TPC[%lx] %pS\n",
8573+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
8574 regs->tpc, (void *) regs->tpc);
8575 }
8576 }
8577diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8578index 8410065f2..4fd4ca22 100644
8579--- a/arch/sparc/lib/Makefile
8580+++ b/arch/sparc/lib/Makefile
8581@@ -2,7 +2,7 @@
8582 #
8583
8584 asflags-y := -ansi -DST_DIV0=0x02
8585-ccflags-y := -Werror
8586+#ccflags-y := -Werror
8587
8588 lib-$(CONFIG_SPARC32) += ashrdi3.o
8589 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8590diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8591index 85c233d..68500e0 100644
8592--- a/arch/sparc/lib/atomic_64.S
8593+++ b/arch/sparc/lib/atomic_64.S
8594@@ -17,7 +17,12 @@
8595 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8596 BACKOFF_SETUP(%o2)
8597 1: lduw [%o1], %g1
8598- add %g1, %o0, %g7
8599+ addcc %g1, %o0, %g7
8600+
8601+#ifdef CONFIG_PAX_REFCOUNT
8602+ tvs %icc, 6
8603+#endif
8604+
8605 cas [%o1], %g1, %g7
8606 cmp %g1, %g7
8607 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8608@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8609 2: BACKOFF_SPIN(%o2, %o3, 1b)
8610 ENDPROC(atomic_add)
8611
8612+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8613+ BACKOFF_SETUP(%o2)
8614+1: lduw [%o1], %g1
8615+ add %g1, %o0, %g7
8616+ cas [%o1], %g1, %g7
8617+ cmp %g1, %g7
8618+ bne,pn %icc, 2f
8619+ nop
8620+ retl
8621+ nop
8622+2: BACKOFF_SPIN(%o2, %o3, 1b)
8623+ENDPROC(atomic_add_unchecked)
8624+
8625 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8626 BACKOFF_SETUP(%o2)
8627 1: lduw [%o1], %g1
8628- sub %g1, %o0, %g7
8629+ subcc %g1, %o0, %g7
8630+
8631+#ifdef CONFIG_PAX_REFCOUNT
8632+ tvs %icc, 6
8633+#endif
8634+
8635 cas [%o1], %g1, %g7
8636 cmp %g1, %g7
8637 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8638@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8639 2: BACKOFF_SPIN(%o2, %o3, 1b)
8640 ENDPROC(atomic_sub)
8641
8642+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8643+ BACKOFF_SETUP(%o2)
8644+1: lduw [%o1], %g1
8645+ sub %g1, %o0, %g7
8646+ cas [%o1], %g1, %g7
8647+ cmp %g1, %g7
8648+ bne,pn %icc, 2f
8649+ nop
8650+ retl
8651+ nop
8652+2: BACKOFF_SPIN(%o2, %o3, 1b)
8653+ENDPROC(atomic_sub_unchecked)
8654+
8655 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8656 BACKOFF_SETUP(%o2)
8657 1: lduw [%o1], %g1
8658- add %g1, %o0, %g7
8659+ addcc %g1, %o0, %g7
8660+
8661+#ifdef CONFIG_PAX_REFCOUNT
8662+ tvs %icc, 6
8663+#endif
8664+
8665 cas [%o1], %g1, %g7
8666 cmp %g1, %g7
8667 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8668@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8669 2: BACKOFF_SPIN(%o2, %o3, 1b)
8670 ENDPROC(atomic_add_ret)
8671
8672+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8673+ BACKOFF_SETUP(%o2)
8674+1: lduw [%o1], %g1
8675+ addcc %g1, %o0, %g7
8676+ cas [%o1], %g1, %g7
8677+ cmp %g1, %g7
8678+ bne,pn %icc, 2f
8679+ add %g7, %o0, %g7
8680+ sra %g7, 0, %o0
8681+ retl
8682+ nop
8683+2: BACKOFF_SPIN(%o2, %o3, 1b)
8684+ENDPROC(atomic_add_ret_unchecked)
8685+
8686 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8687 BACKOFF_SETUP(%o2)
8688 1: lduw [%o1], %g1
8689- sub %g1, %o0, %g7
8690+ subcc %g1, %o0, %g7
8691+
8692+#ifdef CONFIG_PAX_REFCOUNT
8693+ tvs %icc, 6
8694+#endif
8695+
8696 cas [%o1], %g1, %g7
8697 cmp %g1, %g7
8698 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8699@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8700 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8701 BACKOFF_SETUP(%o2)
8702 1: ldx [%o1], %g1
8703- add %g1, %o0, %g7
8704+ addcc %g1, %o0, %g7
8705+
8706+#ifdef CONFIG_PAX_REFCOUNT
8707+ tvs %xcc, 6
8708+#endif
8709+
8710 casx [%o1], %g1, %g7
8711 cmp %g1, %g7
8712 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8713@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8714 2: BACKOFF_SPIN(%o2, %o3, 1b)
8715 ENDPROC(atomic64_add)
8716
8717+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8718+ BACKOFF_SETUP(%o2)
8719+1: ldx [%o1], %g1
8720+ addcc %g1, %o0, %g7
8721+ casx [%o1], %g1, %g7
8722+ cmp %g1, %g7
8723+ bne,pn %xcc, 2f
8724+ nop
8725+ retl
8726+ nop
8727+2: BACKOFF_SPIN(%o2, %o3, 1b)
8728+ENDPROC(atomic64_add_unchecked)
8729+
8730 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8731 BACKOFF_SETUP(%o2)
8732 1: ldx [%o1], %g1
8733- sub %g1, %o0, %g7
8734+ subcc %g1, %o0, %g7
8735+
8736+#ifdef CONFIG_PAX_REFCOUNT
8737+ tvs %xcc, 6
8738+#endif
8739+
8740 casx [%o1], %g1, %g7
8741 cmp %g1, %g7
8742 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8743@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8744 2: BACKOFF_SPIN(%o2, %o3, 1b)
8745 ENDPROC(atomic64_sub)
8746
8747+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8748+ BACKOFF_SETUP(%o2)
8749+1: ldx [%o1], %g1
8750+ subcc %g1, %o0, %g7
8751+ casx [%o1], %g1, %g7
8752+ cmp %g1, %g7
8753+ bne,pn %xcc, 2f
8754+ nop
8755+ retl
8756+ nop
8757+2: BACKOFF_SPIN(%o2, %o3, 1b)
8758+ENDPROC(atomic64_sub_unchecked)
8759+
8760 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8761 BACKOFF_SETUP(%o2)
8762 1: ldx [%o1], %g1
8763- add %g1, %o0, %g7
8764+ addcc %g1, %o0, %g7
8765+
8766+#ifdef CONFIG_PAX_REFCOUNT
8767+ tvs %xcc, 6
8768+#endif
8769+
8770 casx [%o1], %g1, %g7
8771 cmp %g1, %g7
8772 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8773@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8774 2: BACKOFF_SPIN(%o2, %o3, 1b)
8775 ENDPROC(atomic64_add_ret)
8776
8777+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8778+ BACKOFF_SETUP(%o2)
8779+1: ldx [%o1], %g1
8780+ addcc %g1, %o0, %g7
8781+ casx [%o1], %g1, %g7
8782+ cmp %g1, %g7
8783+ bne,pn %xcc, 2f
8784+ add %g7, %o0, %g7
8785+ mov %g7, %o0
8786+ retl
8787+ nop
8788+2: BACKOFF_SPIN(%o2, %o3, 1b)
8789+ENDPROC(atomic64_add_ret_unchecked)
8790+
8791 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8792 BACKOFF_SETUP(%o2)
8793 1: ldx [%o1], %g1
8794- sub %g1, %o0, %g7
8795+ subcc %g1, %o0, %g7
8796+
8797+#ifdef CONFIG_PAX_REFCOUNT
8798+ tvs %xcc, 6
8799+#endif
8800+
8801 casx [%o1], %g1, %g7
8802 cmp %g1, %g7
8803 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8804diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
8805index 0c4e35e..745d3e4 100644
8806--- a/arch/sparc/lib/ksyms.c
8807+++ b/arch/sparc/lib/ksyms.c
8808@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
8809
8810 /* Atomic counter implementation. */
8811 EXPORT_SYMBOL(atomic_add);
8812+EXPORT_SYMBOL(atomic_add_unchecked);
8813 EXPORT_SYMBOL(atomic_add_ret);
8814+EXPORT_SYMBOL(atomic_add_ret_unchecked);
8815 EXPORT_SYMBOL(atomic_sub);
8816+EXPORT_SYMBOL(atomic_sub_unchecked);
8817 EXPORT_SYMBOL(atomic_sub_ret);
8818 EXPORT_SYMBOL(atomic64_add);
8819+EXPORT_SYMBOL(atomic64_add_unchecked);
8820 EXPORT_SYMBOL(atomic64_add_ret);
8821+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
8822 EXPORT_SYMBOL(atomic64_sub);
8823+EXPORT_SYMBOL(atomic64_sub_unchecked);
8824 EXPORT_SYMBOL(atomic64_sub_ret);
8825 EXPORT_SYMBOL(atomic64_dec_if_positive);
8826
8827diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
8828index 30c3ecc..736f015 100644
8829--- a/arch/sparc/mm/Makefile
8830+++ b/arch/sparc/mm/Makefile
8831@@ -2,7 +2,7 @@
8832 #
8833
8834 asflags-y := -ansi
8835-ccflags-y := -Werror
8836+#ccflags-y := -Werror
8837
8838 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8839 obj-y += fault_$(BITS).o
8840diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
8841index e98bfda..ea8d221 100644
8842--- a/arch/sparc/mm/fault_32.c
8843+++ b/arch/sparc/mm/fault_32.c
8844@@ -21,6 +21,9 @@
8845 #include <linux/perf_event.h>
8846 #include <linux/interrupt.h>
8847 #include <linux/kdebug.h>
8848+#include <linux/slab.h>
8849+#include <linux/pagemap.h>
8850+#include <linux/compiler.h>
8851
8852 #include <asm/page.h>
8853 #include <asm/pgtable.h>
8854@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
8855 return safe_compute_effective_address(regs, insn);
8856 }
8857
8858+#ifdef CONFIG_PAX_PAGEEXEC
8859+#ifdef CONFIG_PAX_DLRESOLVE
8860+static void pax_emuplt_close(struct vm_area_struct *vma)
8861+{
8862+ vma->vm_mm->call_dl_resolve = 0UL;
8863+}
8864+
8865+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
8866+{
8867+ unsigned int *kaddr;
8868+
8869+ vmf->page = alloc_page(GFP_HIGHUSER);
8870+ if (!vmf->page)
8871+ return VM_FAULT_OOM;
8872+
8873+ kaddr = kmap(vmf->page);
8874+ memset(kaddr, 0, PAGE_SIZE);
8875+ kaddr[0] = 0x9DE3BFA8U; /* save */
8876+ flush_dcache_page(vmf->page);
8877+ kunmap(vmf->page);
8878+ return VM_FAULT_MAJOR;
8879+}
8880+
8881+static const struct vm_operations_struct pax_vm_ops = {
8882+ .close = pax_emuplt_close,
8883+ .fault = pax_emuplt_fault
8884+};
8885+
8886+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
8887+{
8888+ int ret;
8889+
8890+ INIT_LIST_HEAD(&vma->anon_vma_chain);
8891+ vma->vm_mm = current->mm;
8892+ vma->vm_start = addr;
8893+ vma->vm_end = addr + PAGE_SIZE;
8894+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
8895+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
8896+ vma->vm_ops = &pax_vm_ops;
8897+
8898+ ret = insert_vm_struct(current->mm, vma);
8899+ if (ret)
8900+ return ret;
8901+
8902+ ++current->mm->total_vm;
8903+ return 0;
8904+}
8905+#endif
8906+
8907+/*
8908+ * PaX: decide what to do with offenders (regs->pc = fault address)
8909+ *
8910+ * returns 1 when task should be killed
8911+ * 2 when patched PLT trampoline was detected
8912+ * 3 when unpatched PLT trampoline was detected
8913+ */
8914+static int pax_handle_fetch_fault(struct pt_regs *regs)
8915+{
8916+
8917+#ifdef CONFIG_PAX_EMUPLT
8918+ int err;
8919+
8920+ do { /* PaX: patched PLT emulation #1 */
8921+ unsigned int sethi1, sethi2, jmpl;
8922+
8923+ err = get_user(sethi1, (unsigned int *)regs->pc);
8924+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
8925+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
8926+
8927+ if (err)
8928+ break;
8929+
8930+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
8931+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
8932+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
8933+ {
8934+ unsigned int addr;
8935+
8936+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
8937+ addr = regs->u_regs[UREG_G1];
8938+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8939+ regs->pc = addr;
8940+ regs->npc = addr+4;
8941+ return 2;
8942+ }
8943+ } while (0);
8944+
8945+ do { /* PaX: patched PLT emulation #2 */
8946+ unsigned int ba;
8947+
8948+ err = get_user(ba, (unsigned int *)regs->pc);
8949+
8950+ if (err)
8951+ break;
8952+
8953+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
8954+ unsigned int addr;
8955+
8956+ if ((ba & 0xFFC00000U) == 0x30800000U)
8957+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
8958+ else
8959+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8960+ regs->pc = addr;
8961+ regs->npc = addr+4;
8962+ return 2;
8963+ }
8964+ } while (0);
8965+
8966+ do { /* PaX: patched PLT emulation #3 */
8967+ unsigned int sethi, bajmpl, nop;
8968+
8969+ err = get_user(sethi, (unsigned int *)regs->pc);
8970+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
8971+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
8972+
8973+ if (err)
8974+ break;
8975+
8976+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
8977+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
8978+ nop == 0x01000000U)
8979+ {
8980+ unsigned int addr;
8981+
8982+ addr = (sethi & 0x003FFFFFU) << 10;
8983+ regs->u_regs[UREG_G1] = addr;
8984+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
8985+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
8986+ else
8987+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
8988+ regs->pc = addr;
8989+ regs->npc = addr+4;
8990+ return 2;
8991+ }
8992+ } while (0);
8993+
8994+ do { /* PaX: unpatched PLT emulation step 1 */
8995+ unsigned int sethi, ba, nop;
8996+
8997+ err = get_user(sethi, (unsigned int *)regs->pc);
8998+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
8999+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9000+
9001+ if (err)
9002+ break;
9003+
9004+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9005+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9006+ nop == 0x01000000U)
9007+ {
9008+ unsigned int addr, save, call;
9009+
9010+ if ((ba & 0xFFC00000U) == 0x30800000U)
9011+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9012+ else
9013+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9014+
9015+ err = get_user(save, (unsigned int *)addr);
9016+ err |= get_user(call, (unsigned int *)(addr+4));
9017+ err |= get_user(nop, (unsigned int *)(addr+8));
9018+ if (err)
9019+ break;
9020+
9021+#ifdef CONFIG_PAX_DLRESOLVE
9022+ if (save == 0x9DE3BFA8U &&
9023+ (call & 0xC0000000U) == 0x40000000U &&
9024+ nop == 0x01000000U)
9025+ {
9026+ struct vm_area_struct *vma;
9027+ unsigned long call_dl_resolve;
9028+
9029+ down_read(&current->mm->mmap_sem);
9030+ call_dl_resolve = current->mm->call_dl_resolve;
9031+ up_read(&current->mm->mmap_sem);
9032+ if (likely(call_dl_resolve))
9033+ goto emulate;
9034+
9035+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9036+
9037+ down_write(&current->mm->mmap_sem);
9038+ if (current->mm->call_dl_resolve) {
9039+ call_dl_resolve = current->mm->call_dl_resolve;
9040+ up_write(&current->mm->mmap_sem);
9041+ if (vma)
9042+ kmem_cache_free(vm_area_cachep, vma);
9043+ goto emulate;
9044+ }
9045+
9046+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9047+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9048+ up_write(&current->mm->mmap_sem);
9049+ if (vma)
9050+ kmem_cache_free(vm_area_cachep, vma);
9051+ return 1;
9052+ }
9053+
9054+ if (pax_insert_vma(vma, call_dl_resolve)) {
9055+ up_write(&current->mm->mmap_sem);
9056+ kmem_cache_free(vm_area_cachep, vma);
9057+ return 1;
9058+ }
9059+
9060+ current->mm->call_dl_resolve = call_dl_resolve;
9061+ up_write(&current->mm->mmap_sem);
9062+
9063+emulate:
9064+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9065+ regs->pc = call_dl_resolve;
9066+ regs->npc = addr+4;
9067+ return 3;
9068+ }
9069+#endif
9070+
9071+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9072+ if ((save & 0xFFC00000U) == 0x05000000U &&
9073+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9074+ nop == 0x01000000U)
9075+ {
9076+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9077+ regs->u_regs[UREG_G2] = addr + 4;
9078+ addr = (save & 0x003FFFFFU) << 10;
9079+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9080+ regs->pc = addr;
9081+ regs->npc = addr+4;
9082+ return 3;
9083+ }
9084+ }
9085+ } while (0);
9086+
9087+ do { /* PaX: unpatched PLT emulation step 2 */
9088+ unsigned int save, call, nop;
9089+
9090+ err = get_user(save, (unsigned int *)(regs->pc-4));
9091+ err |= get_user(call, (unsigned int *)regs->pc);
9092+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9093+ if (err)
9094+ break;
9095+
9096+ if (save == 0x9DE3BFA8U &&
9097+ (call & 0xC0000000U) == 0x40000000U &&
9098+ nop == 0x01000000U)
9099+ {
9100+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9101+
9102+ regs->u_regs[UREG_RETPC] = regs->pc;
9103+ regs->pc = dl_resolve;
9104+ regs->npc = dl_resolve+4;
9105+ return 3;
9106+ }
9107+ } while (0);
9108+#endif
9109+
9110+ return 1;
9111+}
9112+
9113+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9114+{
9115+ unsigned long i;
9116+
9117+ printk(KERN_ERR "PAX: bytes at PC: ");
9118+ for (i = 0; i < 8; i++) {
9119+ unsigned int c;
9120+ if (get_user(c, (unsigned int *)pc+i))
9121+ printk(KERN_CONT "???????? ");
9122+ else
9123+ printk(KERN_CONT "%08x ", c);
9124+ }
9125+ printk("\n");
9126+}
9127+#endif
9128+
9129 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9130 int text_fault)
9131 {
9132@@ -230,6 +504,24 @@ good_area:
9133 if (!(vma->vm_flags & VM_WRITE))
9134 goto bad_area;
9135 } else {
9136+
9137+#ifdef CONFIG_PAX_PAGEEXEC
9138+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9139+ up_read(&mm->mmap_sem);
9140+ switch (pax_handle_fetch_fault(regs)) {
9141+
9142+#ifdef CONFIG_PAX_EMUPLT
9143+ case 2:
9144+ case 3:
9145+ return;
9146+#endif
9147+
9148+ }
9149+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9150+ do_group_exit(SIGKILL);
9151+ }
9152+#endif
9153+
9154 /* Allow reads even for write-only mappings */
9155 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9156 goto bad_area;
9157diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9158index 5062ff3..e0b75f3 100644
9159--- a/arch/sparc/mm/fault_64.c
9160+++ b/arch/sparc/mm/fault_64.c
9161@@ -21,6 +21,9 @@
9162 #include <linux/kprobes.h>
9163 #include <linux/kdebug.h>
9164 #include <linux/percpu.h>
9165+#include <linux/slab.h>
9166+#include <linux/pagemap.h>
9167+#include <linux/compiler.h>
9168
9169 #include <asm/page.h>
9170 #include <asm/pgtable.h>
9171@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9172 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9173 regs->tpc);
9174 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9175- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9176+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9177 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9178 dump_stack();
9179 unhandled_fault(regs->tpc, current, regs);
9180@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9181 show_regs(regs);
9182 }
9183
9184+#ifdef CONFIG_PAX_PAGEEXEC
9185+#ifdef CONFIG_PAX_DLRESOLVE
9186+static void pax_emuplt_close(struct vm_area_struct *vma)
9187+{
9188+ vma->vm_mm->call_dl_resolve = 0UL;
9189+}
9190+
9191+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9192+{
9193+ unsigned int *kaddr;
9194+
9195+ vmf->page = alloc_page(GFP_HIGHUSER);
9196+ if (!vmf->page)
9197+ return VM_FAULT_OOM;
9198+
9199+ kaddr = kmap(vmf->page);
9200+ memset(kaddr, 0, PAGE_SIZE);
9201+ kaddr[0] = 0x9DE3BFA8U; /* save */
9202+ flush_dcache_page(vmf->page);
9203+ kunmap(vmf->page);
9204+ return VM_FAULT_MAJOR;
9205+}
9206+
9207+static const struct vm_operations_struct pax_vm_ops = {
9208+ .close = pax_emuplt_close,
9209+ .fault = pax_emuplt_fault
9210+};
9211+
9212+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9213+{
9214+ int ret;
9215+
9216+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9217+ vma->vm_mm = current->mm;
9218+ vma->vm_start = addr;
9219+ vma->vm_end = addr + PAGE_SIZE;
9220+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9221+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9222+ vma->vm_ops = &pax_vm_ops;
9223+
9224+ ret = insert_vm_struct(current->mm, vma);
9225+ if (ret)
9226+ return ret;
9227+
9228+ ++current->mm->total_vm;
9229+ return 0;
9230+}
9231+#endif
9232+
9233+/*
9234+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9235+ *
9236+ * returns 1 when task should be killed
9237+ * 2 when patched PLT trampoline was detected
9238+ * 3 when unpatched PLT trampoline was detected
9239+ */
9240+static int pax_handle_fetch_fault(struct pt_regs *regs)
9241+{
9242+
9243+#ifdef CONFIG_PAX_EMUPLT
9244+ int err;
9245+
9246+ do { /* PaX: patched PLT emulation #1 */
9247+ unsigned int sethi1, sethi2, jmpl;
9248+
9249+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9250+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9251+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9252+
9253+ if (err)
9254+ break;
9255+
9256+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9257+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9258+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9259+ {
9260+ unsigned long addr;
9261+
9262+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9263+ addr = regs->u_regs[UREG_G1];
9264+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9265+
9266+ if (test_thread_flag(TIF_32BIT))
9267+ addr &= 0xFFFFFFFFUL;
9268+
9269+ regs->tpc = addr;
9270+ regs->tnpc = addr+4;
9271+ return 2;
9272+ }
9273+ } while (0);
9274+
9275+ do { /* PaX: patched PLT emulation #2 */
9276+ unsigned int ba;
9277+
9278+ err = get_user(ba, (unsigned int *)regs->tpc);
9279+
9280+ if (err)
9281+ break;
9282+
9283+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9284+ unsigned long addr;
9285+
9286+ if ((ba & 0xFFC00000U) == 0x30800000U)
9287+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9288+ else
9289+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9290+
9291+ if (test_thread_flag(TIF_32BIT))
9292+ addr &= 0xFFFFFFFFUL;
9293+
9294+ regs->tpc = addr;
9295+ regs->tnpc = addr+4;
9296+ return 2;
9297+ }
9298+ } while (0);
9299+
9300+ do { /* PaX: patched PLT emulation #3 */
9301+ unsigned int sethi, bajmpl, nop;
9302+
9303+ err = get_user(sethi, (unsigned int *)regs->tpc);
9304+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9305+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9306+
9307+ if (err)
9308+ break;
9309+
9310+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9311+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9312+ nop == 0x01000000U)
9313+ {
9314+ unsigned long addr;
9315+
9316+ addr = (sethi & 0x003FFFFFU) << 10;
9317+ regs->u_regs[UREG_G1] = addr;
9318+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9319+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9320+ else
9321+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9322+
9323+ if (test_thread_flag(TIF_32BIT))
9324+ addr &= 0xFFFFFFFFUL;
9325+
9326+ regs->tpc = addr;
9327+ regs->tnpc = addr+4;
9328+ return 2;
9329+ }
9330+ } while (0);
9331+
9332+ do { /* PaX: patched PLT emulation #4 */
9333+ unsigned int sethi, mov1, call, mov2;
9334+
9335+ err = get_user(sethi, (unsigned int *)regs->tpc);
9336+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9337+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9338+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9339+
9340+ if (err)
9341+ break;
9342+
9343+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9344+ mov1 == 0x8210000FU &&
9345+ (call & 0xC0000000U) == 0x40000000U &&
9346+ mov2 == 0x9E100001U)
9347+ {
9348+ unsigned long addr;
9349+
9350+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9351+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9352+
9353+ if (test_thread_flag(TIF_32BIT))
9354+ addr &= 0xFFFFFFFFUL;
9355+
9356+ regs->tpc = addr;
9357+ regs->tnpc = addr+4;
9358+ return 2;
9359+ }
9360+ } while (0);
9361+
9362+ do { /* PaX: patched PLT emulation #5 */
9363+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9364+
9365+ err = get_user(sethi, (unsigned int *)regs->tpc);
9366+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9367+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9368+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9369+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9370+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9371+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9372+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9373+
9374+ if (err)
9375+ break;
9376+
9377+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9378+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9379+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9380+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9381+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9382+ sllx == 0x83287020U &&
9383+ jmpl == 0x81C04005U &&
9384+ nop == 0x01000000U)
9385+ {
9386+ unsigned long addr;
9387+
9388+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9389+ regs->u_regs[UREG_G1] <<= 32;
9390+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9391+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9392+ regs->tpc = addr;
9393+ regs->tnpc = addr+4;
9394+ return 2;
9395+ }
9396+ } while (0);
9397+
9398+ do { /* PaX: patched PLT emulation #6 */
9399+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9400+
9401+ err = get_user(sethi, (unsigned int *)regs->tpc);
9402+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9403+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9404+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9405+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9406+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9407+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9408+
9409+ if (err)
9410+ break;
9411+
9412+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9413+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9414+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9415+ sllx == 0x83287020U &&
9416+ (or & 0xFFFFE000U) == 0x8A116000U &&
9417+ jmpl == 0x81C04005U &&
9418+ nop == 0x01000000U)
9419+ {
9420+ unsigned long addr;
9421+
9422+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9423+ regs->u_regs[UREG_G1] <<= 32;
9424+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9425+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9426+ regs->tpc = addr;
9427+ regs->tnpc = addr+4;
9428+ return 2;
9429+ }
9430+ } while (0);
9431+
9432+ do { /* PaX: unpatched PLT emulation step 1 */
9433+ unsigned int sethi, ba, nop;
9434+
9435+ err = get_user(sethi, (unsigned int *)regs->tpc);
9436+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9437+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9438+
9439+ if (err)
9440+ break;
9441+
9442+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9443+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9444+ nop == 0x01000000U)
9445+ {
9446+ unsigned long addr;
9447+ unsigned int save, call;
9448+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9449+
9450+ if ((ba & 0xFFC00000U) == 0x30800000U)
9451+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9452+ else
9453+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9454+
9455+ if (test_thread_flag(TIF_32BIT))
9456+ addr &= 0xFFFFFFFFUL;
9457+
9458+ err = get_user(save, (unsigned int *)addr);
9459+ err |= get_user(call, (unsigned int *)(addr+4));
9460+ err |= get_user(nop, (unsigned int *)(addr+8));
9461+ if (err)
9462+ break;
9463+
9464+#ifdef CONFIG_PAX_DLRESOLVE
9465+ if (save == 0x9DE3BFA8U &&
9466+ (call & 0xC0000000U) == 0x40000000U &&
9467+ nop == 0x01000000U)
9468+ {
9469+ struct vm_area_struct *vma;
9470+ unsigned long call_dl_resolve;
9471+
9472+ down_read(&current->mm->mmap_sem);
9473+ call_dl_resolve = current->mm->call_dl_resolve;
9474+ up_read(&current->mm->mmap_sem);
9475+ if (likely(call_dl_resolve))
9476+ goto emulate;
9477+
9478+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9479+
9480+ down_write(&current->mm->mmap_sem);
9481+ if (current->mm->call_dl_resolve) {
9482+ call_dl_resolve = current->mm->call_dl_resolve;
9483+ up_write(&current->mm->mmap_sem);
9484+ if (vma)
9485+ kmem_cache_free(vm_area_cachep, vma);
9486+ goto emulate;
9487+ }
9488+
9489+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9490+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9491+ up_write(&current->mm->mmap_sem);
9492+ if (vma)
9493+ kmem_cache_free(vm_area_cachep, vma);
9494+ return 1;
9495+ }
9496+
9497+ if (pax_insert_vma(vma, call_dl_resolve)) {
9498+ up_write(&current->mm->mmap_sem);
9499+ kmem_cache_free(vm_area_cachep, vma);
9500+ return 1;
9501+ }
9502+
9503+ current->mm->call_dl_resolve = call_dl_resolve;
9504+ up_write(&current->mm->mmap_sem);
9505+
9506+emulate:
9507+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9508+ regs->tpc = call_dl_resolve;
9509+ regs->tnpc = addr+4;
9510+ return 3;
9511+ }
9512+#endif
9513+
9514+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9515+ if ((save & 0xFFC00000U) == 0x05000000U &&
9516+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9517+ nop == 0x01000000U)
9518+ {
9519+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9520+ regs->u_regs[UREG_G2] = addr + 4;
9521+ addr = (save & 0x003FFFFFU) << 10;
9522+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9523+
9524+ if (test_thread_flag(TIF_32BIT))
9525+ addr &= 0xFFFFFFFFUL;
9526+
9527+ regs->tpc = addr;
9528+ regs->tnpc = addr+4;
9529+ return 3;
9530+ }
9531+
9532+ /* PaX: 64-bit PLT stub */
9533+ err = get_user(sethi1, (unsigned int *)addr);
9534+ err |= get_user(sethi2, (unsigned int *)(addr+4));
9535+ err |= get_user(or1, (unsigned int *)(addr+8));
9536+ err |= get_user(or2, (unsigned int *)(addr+12));
9537+ err |= get_user(sllx, (unsigned int *)(addr+16));
9538+ err |= get_user(add, (unsigned int *)(addr+20));
9539+ err |= get_user(jmpl, (unsigned int *)(addr+24));
9540+ err |= get_user(nop, (unsigned int *)(addr+28));
9541+ if (err)
9542+ break;
9543+
9544+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9545+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9546+ (or1 & 0xFFFFE000U) == 0x88112000U &&
9547+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9548+ sllx == 0x89293020U &&
9549+ add == 0x8A010005U &&
9550+ jmpl == 0x89C14000U &&
9551+ nop == 0x01000000U)
9552+ {
9553+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9554+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9555+ regs->u_regs[UREG_G4] <<= 32;
9556+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9557+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9558+ regs->u_regs[UREG_G4] = addr + 24;
9559+ addr = regs->u_regs[UREG_G5];
9560+ regs->tpc = addr;
9561+ regs->tnpc = addr+4;
9562+ return 3;
9563+ }
9564+ }
9565+ } while (0);
9566+
9567+#ifdef CONFIG_PAX_DLRESOLVE
9568+ do { /* PaX: unpatched PLT emulation step 2 */
9569+ unsigned int save, call, nop;
9570+
9571+ err = get_user(save, (unsigned int *)(regs->tpc-4));
9572+ err |= get_user(call, (unsigned int *)regs->tpc);
9573+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9574+ if (err)
9575+ break;
9576+
9577+ if (save == 0x9DE3BFA8U &&
9578+ (call & 0xC0000000U) == 0x40000000U &&
9579+ nop == 0x01000000U)
9580+ {
9581+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9582+
9583+ if (test_thread_flag(TIF_32BIT))
9584+ dl_resolve &= 0xFFFFFFFFUL;
9585+
9586+ regs->u_regs[UREG_RETPC] = regs->tpc;
9587+ regs->tpc = dl_resolve;
9588+ regs->tnpc = dl_resolve+4;
9589+ return 3;
9590+ }
9591+ } while (0);
9592+#endif
9593+
9594+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9595+ unsigned int sethi, ba, nop;
9596+
9597+ err = get_user(sethi, (unsigned int *)regs->tpc);
9598+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9599+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9600+
9601+ if (err)
9602+ break;
9603+
9604+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9605+ (ba & 0xFFF00000U) == 0x30600000U &&
9606+ nop == 0x01000000U)
9607+ {
9608+ unsigned long addr;
9609+
9610+ addr = (sethi & 0x003FFFFFU) << 10;
9611+ regs->u_regs[UREG_G1] = addr;
9612+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9613+
9614+ if (test_thread_flag(TIF_32BIT))
9615+ addr &= 0xFFFFFFFFUL;
9616+
9617+ regs->tpc = addr;
9618+ regs->tnpc = addr+4;
9619+ return 2;
9620+ }
9621+ } while (0);
9622+
9623+#endif
9624+
9625+ return 1;
9626+}
9627+
9628+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9629+{
9630+ unsigned long i;
9631+
9632+ printk(KERN_ERR "PAX: bytes at PC: ");
9633+ for (i = 0; i < 8; i++) {
9634+ unsigned int c;
9635+ if (get_user(c, (unsigned int *)pc+i))
9636+ printk(KERN_CONT "???????? ");
9637+ else
9638+ printk(KERN_CONT "%08x ", c);
9639+ }
9640+ printk("\n");
9641+}
9642+#endif
9643+
9644 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9645 {
9646 struct mm_struct *mm = current->mm;
9647@@ -341,6 +804,29 @@ retry:
9648 if (!vma)
9649 goto bad_area;
9650
9651+#ifdef CONFIG_PAX_PAGEEXEC
9652+ /* PaX: detect ITLB misses on non-exec pages */
9653+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9654+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9655+ {
9656+ if (address != regs->tpc)
9657+ goto good_area;
9658+
9659+ up_read(&mm->mmap_sem);
9660+ switch (pax_handle_fetch_fault(regs)) {
9661+
9662+#ifdef CONFIG_PAX_EMUPLT
9663+ case 2:
9664+ case 3:
9665+ return;
9666+#endif
9667+
9668+ }
9669+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9670+ do_group_exit(SIGKILL);
9671+ }
9672+#endif
9673+
9674 /* Pure DTLB misses do not tell us whether the fault causing
9675 * load/store/atomic was a write or not, it only says that there
9676 * was no match. So in such a case we (carefully) read the
9677diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9678index d2b5944..bd813f2 100644
9679--- a/arch/sparc/mm/hugetlbpage.c
9680+++ b/arch/sparc/mm/hugetlbpage.c
9681@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9682
9683 info.flags = 0;
9684 info.length = len;
9685- info.low_limit = TASK_UNMAPPED_BASE;
9686+ info.low_limit = mm->mmap_base;
9687 info.high_limit = min(task_size, VA_EXCLUDE_START);
9688 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9689 info.align_offset = 0;
9690@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9691 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9692 VM_BUG_ON(addr != -ENOMEM);
9693 info.low_limit = VA_EXCLUDE_END;
9694+
9695+#ifdef CONFIG_PAX_RANDMMAP
9696+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9697+ info.low_limit += mm->delta_mmap;
9698+#endif
9699+
9700 info.high_limit = task_size;
9701 addr = vm_unmapped_area(&info);
9702 }
9703@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9704 VM_BUG_ON(addr != -ENOMEM);
9705 info.flags = 0;
9706 info.low_limit = TASK_UNMAPPED_BASE;
9707+
9708+#ifdef CONFIG_PAX_RANDMMAP
9709+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9710+ info.low_limit += mm->delta_mmap;
9711+#endif
9712+
9713 info.high_limit = STACK_TOP32;
9714 addr = vm_unmapped_area(&info);
9715 }
9716@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9717 struct mm_struct *mm = current->mm;
9718 struct vm_area_struct *vma;
9719 unsigned long task_size = TASK_SIZE;
9720+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9721
9722 if (test_thread_flag(TIF_32BIT))
9723 task_size = STACK_TOP32;
9724@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9725 return addr;
9726 }
9727
9728+#ifdef CONFIG_PAX_RANDMMAP
9729+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9730+#endif
9731+
9732 if (addr) {
9733 addr = ALIGN(addr, HPAGE_SIZE);
9734 vma = find_vma(mm, addr);
9735- if (task_size - len >= addr &&
9736- (!vma || addr + len <= vma->vm_start))
9737+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9738 return addr;
9739 }
9740 if (mm->get_unmapped_area == arch_get_unmapped_area)
9741diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
9742index f4500c6..889656c 100644
9743--- a/arch/tile/include/asm/atomic_64.h
9744+++ b/arch/tile/include/asm/atomic_64.h
9745@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9746
9747 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
9748
9749+#define atomic64_read_unchecked(v) atomic64_read(v)
9750+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
9751+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
9752+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
9753+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
9754+#define atomic64_inc_unchecked(v) atomic64_inc(v)
9755+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
9756+#define atomic64_dec_unchecked(v) atomic64_dec(v)
9757+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
9758+
9759 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
9760 #define smp_mb__before_atomic_dec() smp_mb()
9761 #define smp_mb__after_atomic_dec() smp_mb()
9762diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
9763index a9a5299..0fce79e 100644
9764--- a/arch/tile/include/asm/cache.h
9765+++ b/arch/tile/include/asm/cache.h
9766@@ -15,11 +15,12 @@
9767 #ifndef _ASM_TILE_CACHE_H
9768 #define _ASM_TILE_CACHE_H
9769
9770+#include <linux/const.h>
9771 #include <arch/chip.h>
9772
9773 /* bytes per L1 data cache line */
9774 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
9775-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9776+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9777
9778 /* bytes per L2 cache line */
9779 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
9780diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
9781index 9ab078a..d6635c2 100644
9782--- a/arch/tile/include/asm/uaccess.h
9783+++ b/arch/tile/include/asm/uaccess.h
9784@@ -403,9 +403,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
9785 const void __user *from,
9786 unsigned long n)
9787 {
9788- int sz = __compiletime_object_size(to);
9789+ size_t sz = __compiletime_object_size(to);
9790
9791- if (likely(sz == -1 || sz >= n))
9792+ if (likely(sz == (size_t)-1 || sz >= n))
9793 n = _copy_from_user(to, from, n);
9794 else
9795 copy_from_user_overflow();
9796diff --git a/arch/um/Makefile b/arch/um/Makefile
9797index 133f7de..1d6f2f1 100644
9798--- a/arch/um/Makefile
9799+++ b/arch/um/Makefile
9800@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
9801 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
9802 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
9803
9804+ifdef CONSTIFY_PLUGIN
9805+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
9806+endif
9807+
9808 #This will adjust *FLAGS accordingly to the platform.
9809 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
9810
9811diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
9812index 19e1bdd..3665b77 100644
9813--- a/arch/um/include/asm/cache.h
9814+++ b/arch/um/include/asm/cache.h
9815@@ -1,6 +1,7 @@
9816 #ifndef __UM_CACHE_H
9817 #define __UM_CACHE_H
9818
9819+#include <linux/const.h>
9820
9821 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
9822 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9823@@ -12,6 +13,6 @@
9824 # define L1_CACHE_SHIFT 5
9825 #endif
9826
9827-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9828+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9829
9830 #endif
9831diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
9832index 2e0a6b1..a64d0f5 100644
9833--- a/arch/um/include/asm/kmap_types.h
9834+++ b/arch/um/include/asm/kmap_types.h
9835@@ -8,6 +8,6 @@
9836
9837 /* No more #include "asm/arch/kmap_types.h" ! */
9838
9839-#define KM_TYPE_NR 14
9840+#define KM_TYPE_NR 15
9841
9842 #endif
9843diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
9844index 5ff53d9..5850cdf 100644
9845--- a/arch/um/include/asm/page.h
9846+++ b/arch/um/include/asm/page.h
9847@@ -14,6 +14,9 @@
9848 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
9849 #define PAGE_MASK (~(PAGE_SIZE-1))
9850
9851+#define ktla_ktva(addr) (addr)
9852+#define ktva_ktla(addr) (addr)
9853+
9854 #ifndef __ASSEMBLY__
9855
9856 struct page;
9857diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
9858index 0032f92..cd151e0 100644
9859--- a/arch/um/include/asm/pgtable-3level.h
9860+++ b/arch/um/include/asm/pgtable-3level.h
9861@@ -58,6 +58,7 @@
9862 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
9863 #define pud_populate(mm, pud, pmd) \
9864 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
9865+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
9866
9867 #ifdef CONFIG_64BIT
9868 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
9869diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
9870index b462b13..e7a19aa 100644
9871--- a/arch/um/kernel/process.c
9872+++ b/arch/um/kernel/process.c
9873@@ -386,22 +386,6 @@ int singlestepping(void * t)
9874 return 2;
9875 }
9876
9877-/*
9878- * Only x86 and x86_64 have an arch_align_stack().
9879- * All other arches have "#define arch_align_stack(x) (x)"
9880- * in their asm/system.h
9881- * As this is included in UML from asm-um/system-generic.h,
9882- * we can use it to behave as the subarch does.
9883- */
9884-#ifndef arch_align_stack
9885-unsigned long arch_align_stack(unsigned long sp)
9886-{
9887- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
9888- sp -= get_random_int() % 8192;
9889- return sp & ~0xf;
9890-}
9891-#endif
9892-
9893 unsigned long get_wchan(struct task_struct *p)
9894 {
9895 unsigned long stack_page, sp, ip;
9896diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
9897index ad8f795..2c7eec6 100644
9898--- a/arch/unicore32/include/asm/cache.h
9899+++ b/arch/unicore32/include/asm/cache.h
9900@@ -12,8 +12,10 @@
9901 #ifndef __UNICORE_CACHE_H__
9902 #define __UNICORE_CACHE_H__
9903
9904-#define L1_CACHE_SHIFT (5)
9905-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9906+#include <linux/const.h>
9907+
9908+#define L1_CACHE_SHIFT 5
9909+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9910
9911 /*
9912 * Memory returned by kmalloc() may be used for DMA, so we must make
9913diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
9914index 0694d09..b58b3aa 100644
9915--- a/arch/x86/Kconfig
9916+++ b/arch/x86/Kconfig
9917@@ -238,7 +238,7 @@ config X86_HT
9918
9919 config X86_32_LAZY_GS
9920 def_bool y
9921- depends on X86_32 && !CC_STACKPROTECTOR
9922+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
9923
9924 config ARCH_HWEIGHT_CFLAGS
9925 string
9926@@ -1031,6 +1031,7 @@ config MICROCODE_OLD_INTERFACE
9927
9928 config X86_MSR
9929 tristate "/dev/cpu/*/msr - Model-specific register support"
9930+ depends on !GRKERNSEC_KMEM
9931 ---help---
9932 This device gives privileged processes access to the x86
9933 Model-Specific Registers (MSRs). It is a character device with
9934@@ -1054,7 +1055,7 @@ choice
9935
9936 config NOHIGHMEM
9937 bool "off"
9938- depends on !X86_NUMAQ
9939+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9940 ---help---
9941 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
9942 However, the address space of 32-bit x86 processors is only 4
9943@@ -1091,7 +1092,7 @@ config NOHIGHMEM
9944
9945 config HIGHMEM4G
9946 bool "4GB"
9947- depends on !X86_NUMAQ
9948+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
9949 ---help---
9950 Select this if you have a 32-bit processor and between 1 and 4
9951 gigabytes of physical RAM.
9952@@ -1145,7 +1146,7 @@ config PAGE_OFFSET
9953 hex
9954 default 0xB0000000 if VMSPLIT_3G_OPT
9955 default 0x80000000 if VMSPLIT_2G
9956- default 0x78000000 if VMSPLIT_2G_OPT
9957+ default 0x70000000 if VMSPLIT_2G_OPT
9958 default 0x40000000 if VMSPLIT_1G
9959 default 0xC0000000
9960 depends on X86_32
9961@@ -1542,6 +1543,7 @@ config SECCOMP
9962
9963 config CC_STACKPROTECTOR
9964 bool "Enable -fstack-protector buffer overflow detection"
9965+ depends on X86_64 || !PAX_MEMORY_UDEREF
9966 ---help---
9967 This option turns on the -fstack-protector GCC feature. This
9968 feature puts, at the beginning of functions, a canary value on
9969@@ -1599,6 +1601,7 @@ config KEXEC_JUMP
9970 config PHYSICAL_START
9971 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
9972 default "0x1000000"
9973+ range 0x400000 0x40000000
9974 ---help---
9975 This gives the physical address where the kernel is loaded.
9976
9977@@ -1662,6 +1665,7 @@ config X86_NEED_RELOCS
9978 config PHYSICAL_ALIGN
9979 hex "Alignment value to which kernel should be aligned" if X86_32
9980 default "0x1000000"
9981+ range 0x400000 0x1000000 if PAX_KERNEXEC
9982 range 0x2000 0x1000000
9983 ---help---
9984 This value puts the alignment restrictions on physical address
9985@@ -1737,9 +1741,10 @@ config DEBUG_HOTPLUG_CPU0
9986 If unsure, say N.
9987
9988 config COMPAT_VDSO
9989- def_bool y
9990+ def_bool n
9991 prompt "Compat VDSO support"
9992 depends on X86_32 || IA32_EMULATION
9993+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
9994 ---help---
9995 Map the 32-bit VDSO to the predictable old-style address too.
9996
9997diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
9998index c026cca..14657ae 100644
9999--- a/arch/x86/Kconfig.cpu
10000+++ b/arch/x86/Kconfig.cpu
10001@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10002
10003 config X86_F00F_BUG
10004 def_bool y
10005- depends on M586MMX || M586TSC || M586 || M486
10006+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10007
10008 config X86_INVD_BUG
10009 def_bool y
10010@@ -327,7 +327,7 @@ config X86_INVD_BUG
10011
10012 config X86_ALIGNMENT_16
10013 def_bool y
10014- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10015+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10016
10017 config X86_INTEL_USERCOPY
10018 def_bool y
10019@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10020 # generates cmov.
10021 config X86_CMOV
10022 def_bool y
10023- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10024+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10025
10026 config X86_MINIMUM_CPU_FAMILY
10027 int
10028diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10029index b322f12..652d0d9 100644
10030--- a/arch/x86/Kconfig.debug
10031+++ b/arch/x86/Kconfig.debug
10032@@ -84,7 +84,7 @@ config X86_PTDUMP
10033 config DEBUG_RODATA
10034 bool "Write protect kernel read-only data structures"
10035 default y
10036- depends on DEBUG_KERNEL
10037+ depends on DEBUG_KERNEL && BROKEN
10038 ---help---
10039 Mark the kernel read-only data as write-protected in the pagetables,
10040 in order to catch accidental (and incorrect) writes to such const
10041@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10042
10043 config DEBUG_SET_MODULE_RONX
10044 bool "Set loadable kernel module data as NX and text as RO"
10045- depends on MODULES
10046+ depends on MODULES && BROKEN
10047 ---help---
10048 This option helps catch unintended modifications to loadable
10049 kernel module's text and read-only data. It also prevents execution
10050@@ -294,7 +294,7 @@ config OPTIMIZE_INLINING
10051
10052 config DEBUG_STRICT_USER_COPY_CHECKS
10053 bool "Strict copy size checks"
10054- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
10055+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
10056 ---help---
10057 Enabling this option turns a certain set of sanity checks for user
10058 copy operations into compile time failures.
10059diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10060index e71fc42..7829607 100644
10061--- a/arch/x86/Makefile
10062+++ b/arch/x86/Makefile
10063@@ -50,6 +50,7 @@ else
10064 UTS_MACHINE := x86_64
10065 CHECKFLAGS += -D__x86_64__ -m64
10066
10067+ biarch := $(call cc-option,-m64)
10068 KBUILD_AFLAGS += -m64
10069 KBUILD_CFLAGS += -m64
10070
10071@@ -230,3 +231,12 @@ define archhelp
10072 echo ' FDARGS="..." arguments for the booted kernel'
10073 echo ' FDINITRD=file initrd for the booted kernel'
10074 endef
10075+
10076+define OLD_LD
10077+
10078+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10079+*** Please upgrade your binutils to 2.18 or newer
10080+endef
10081+
10082+archprepare:
10083+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10084diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10085index 379814b..add62ce 100644
10086--- a/arch/x86/boot/Makefile
10087+++ b/arch/x86/boot/Makefile
10088@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10089 $(call cc-option, -fno-stack-protector) \
10090 $(call cc-option, -mpreferred-stack-boundary=2)
10091 KBUILD_CFLAGS += $(call cc-option, -m32)
10092+ifdef CONSTIFY_PLUGIN
10093+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10094+endif
10095 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10096 GCOV_PROFILE := n
10097
10098diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10099index 878e4b9..20537ab 100644
10100--- a/arch/x86/boot/bitops.h
10101+++ b/arch/x86/boot/bitops.h
10102@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10103 u8 v;
10104 const u32 *p = (const u32 *)addr;
10105
10106- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10107+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10108 return v;
10109 }
10110
10111@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10112
10113 static inline void set_bit(int nr, void *addr)
10114 {
10115- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10116+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10117 }
10118
10119 #endif /* BOOT_BITOPS_H */
10120diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10121index 18997e5..83d9c67 100644
10122--- a/arch/x86/boot/boot.h
10123+++ b/arch/x86/boot/boot.h
10124@@ -85,7 +85,7 @@ static inline void io_delay(void)
10125 static inline u16 ds(void)
10126 {
10127 u16 seg;
10128- asm("movw %%ds,%0" : "=rm" (seg));
10129+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10130 return seg;
10131 }
10132
10133@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10134 static inline int memcmp(const void *s1, const void *s2, size_t len)
10135 {
10136 u8 diff;
10137- asm("repe; cmpsb; setnz %0"
10138+ asm volatile("repe; cmpsb; setnz %0"
10139 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10140 return diff;
10141 }
10142diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10143index 8a84501..b2d165f 100644
10144--- a/arch/x86/boot/compressed/Makefile
10145+++ b/arch/x86/boot/compressed/Makefile
10146@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10147 KBUILD_CFLAGS += $(cflags-y)
10148 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10149 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10150+ifdef CONSTIFY_PLUGIN
10151+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10152+endif
10153
10154 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10155 GCOV_PROFILE := n
10156diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10157index c205035..5853587 100644
10158--- a/arch/x86/boot/compressed/eboot.c
10159+++ b/arch/x86/boot/compressed/eboot.c
10160@@ -150,7 +150,6 @@ again:
10161 *addr = max_addr;
10162 }
10163
10164-free_pool:
10165 efi_call_phys1(sys_table->boottime->free_pool, map);
10166
10167 fail:
10168@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10169 if (i == map_size / desc_size)
10170 status = EFI_NOT_FOUND;
10171
10172-free_pool:
10173 efi_call_phys1(sys_table->boottime->free_pool, map);
10174 fail:
10175 return status;
10176diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10177index 1e3184f..0d11e2e 100644
10178--- a/arch/x86/boot/compressed/head_32.S
10179+++ b/arch/x86/boot/compressed/head_32.S
10180@@ -118,7 +118,7 @@ preferred_addr:
10181 notl %eax
10182 andl %eax, %ebx
10183 #else
10184- movl $LOAD_PHYSICAL_ADDR, %ebx
10185+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10186 #endif
10187
10188 /* Target address to relocate to for decompression */
10189@@ -204,7 +204,7 @@ relocated:
10190 * and where it was actually loaded.
10191 */
10192 movl %ebp, %ebx
10193- subl $LOAD_PHYSICAL_ADDR, %ebx
10194+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10195 jz 2f /* Nothing to be done if loaded at compiled addr. */
10196 /*
10197 * Process relocations.
10198@@ -212,8 +212,7 @@ relocated:
10199
10200 1: subl $4, %edi
10201 movl (%edi), %ecx
10202- testl %ecx, %ecx
10203- jz 2f
10204+ jecxz 2f
10205 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10206 jmp 1b
10207 2:
10208diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10209index f5d1aaa..cce11dc 100644
10210--- a/arch/x86/boot/compressed/head_64.S
10211+++ b/arch/x86/boot/compressed/head_64.S
10212@@ -91,7 +91,7 @@ ENTRY(startup_32)
10213 notl %eax
10214 andl %eax, %ebx
10215 #else
10216- movl $LOAD_PHYSICAL_ADDR, %ebx
10217+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10218 #endif
10219
10220 /* Target address to relocate to for decompression */
10221@@ -273,7 +273,7 @@ preferred_addr:
10222 notq %rax
10223 andq %rax, %rbp
10224 #else
10225- movq $LOAD_PHYSICAL_ADDR, %rbp
10226+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10227 #endif
10228
10229 /* Target address to relocate to for decompression */
10230diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10231index 88f7ff6..ed695dd 100644
10232--- a/arch/x86/boot/compressed/misc.c
10233+++ b/arch/x86/boot/compressed/misc.c
10234@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10235 case PT_LOAD:
10236 #ifdef CONFIG_RELOCATABLE
10237 dest = output;
10238- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10239+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10240 #else
10241 dest = (void *)(phdr->p_paddr);
10242 #endif
10243@@ -352,7 +352,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10244 error("Destination address too large");
10245 #endif
10246 #ifndef CONFIG_RELOCATABLE
10247- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10248+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10249 error("Wrong destination address");
10250 #endif
10251
10252diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10253index 4d3ff03..e4972ff 100644
10254--- a/arch/x86/boot/cpucheck.c
10255+++ b/arch/x86/boot/cpucheck.c
10256@@ -74,7 +74,7 @@ static int has_fpu(void)
10257 u16 fcw = -1, fsw = -1;
10258 u32 cr0;
10259
10260- asm("movl %%cr0,%0" : "=r" (cr0));
10261+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10262 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10263 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10264 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10265@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10266 {
10267 u32 f0, f1;
10268
10269- asm("pushfl ; "
10270+ asm volatile("pushfl ; "
10271 "pushfl ; "
10272 "popl %0 ; "
10273 "movl %0,%1 ; "
10274@@ -115,7 +115,7 @@ static void get_flags(void)
10275 set_bit(X86_FEATURE_FPU, cpu.flags);
10276
10277 if (has_eflag(X86_EFLAGS_ID)) {
10278- asm("cpuid"
10279+ asm volatile("cpuid"
10280 : "=a" (max_intel_level),
10281 "=b" (cpu_vendor[0]),
10282 "=d" (cpu_vendor[1]),
10283@@ -124,7 +124,7 @@ static void get_flags(void)
10284
10285 if (max_intel_level >= 0x00000001 &&
10286 max_intel_level <= 0x0000ffff) {
10287- asm("cpuid"
10288+ asm volatile("cpuid"
10289 : "=a" (tfms),
10290 "=c" (cpu.flags[4]),
10291 "=d" (cpu.flags[0])
10292@@ -136,7 +136,7 @@ static void get_flags(void)
10293 cpu.model += ((tfms >> 16) & 0xf) << 4;
10294 }
10295
10296- asm("cpuid"
10297+ asm volatile("cpuid"
10298 : "=a" (max_amd_level)
10299 : "a" (0x80000000)
10300 : "ebx", "ecx", "edx");
10301@@ -144,7 +144,7 @@ static void get_flags(void)
10302 if (max_amd_level >= 0x80000001 &&
10303 max_amd_level <= 0x8000ffff) {
10304 u32 eax = 0x80000001;
10305- asm("cpuid"
10306+ asm volatile("cpuid"
10307 : "+a" (eax),
10308 "=c" (cpu.flags[6]),
10309 "=d" (cpu.flags[1])
10310@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10311 u32 ecx = MSR_K7_HWCR;
10312 u32 eax, edx;
10313
10314- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10315+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10316 eax &= ~(1 << 15);
10317- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10318+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10319
10320 get_flags(); /* Make sure it really did something */
10321 err = check_flags();
10322@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10323 u32 ecx = MSR_VIA_FCR;
10324 u32 eax, edx;
10325
10326- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10327+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10328 eax |= (1<<1)|(1<<7);
10329- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10330+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10331
10332 set_bit(X86_FEATURE_CX8, cpu.flags);
10333 err = check_flags();
10334@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10335 u32 eax, edx;
10336 u32 level = 1;
10337
10338- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10339- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10340- asm("cpuid"
10341+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10342+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10343+ asm volatile("cpuid"
10344 : "+a" (level), "=d" (cpu.flags[0])
10345 : : "ecx", "ebx");
10346- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10347+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10348
10349 err = check_flags();
10350 }
10351diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10352index 944ce59..87ee37a 100644
10353--- a/arch/x86/boot/header.S
10354+++ b/arch/x86/boot/header.S
10355@@ -401,10 +401,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10356 # single linked list of
10357 # struct setup_data
10358
10359-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10360+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10361
10362 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10363+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10364+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10365+#else
10366 #define VO_INIT_SIZE (VO__end - VO__text)
10367+#endif
10368 #if ZO_INIT_SIZE > VO_INIT_SIZE
10369 #define INIT_SIZE ZO_INIT_SIZE
10370 #else
10371diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10372index db75d07..8e6d0af 100644
10373--- a/arch/x86/boot/memory.c
10374+++ b/arch/x86/boot/memory.c
10375@@ -19,7 +19,7 @@
10376
10377 static int detect_memory_e820(void)
10378 {
10379- int count = 0;
10380+ unsigned int count = 0;
10381 struct biosregs ireg, oreg;
10382 struct e820entry *desc = boot_params.e820_map;
10383 static struct e820entry buf; /* static so it is zeroed */
10384diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10385index 11e8c6e..fdbb1ed 100644
10386--- a/arch/x86/boot/video-vesa.c
10387+++ b/arch/x86/boot/video-vesa.c
10388@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10389
10390 boot_params.screen_info.vesapm_seg = oreg.es;
10391 boot_params.screen_info.vesapm_off = oreg.di;
10392+ boot_params.screen_info.vesapm_size = oreg.cx;
10393 }
10394
10395 /*
10396diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10397index 43eda28..5ab5fdb 100644
10398--- a/arch/x86/boot/video.c
10399+++ b/arch/x86/boot/video.c
10400@@ -96,7 +96,7 @@ static void store_mode_params(void)
10401 static unsigned int get_entry(void)
10402 {
10403 char entry_buf[4];
10404- int i, len = 0;
10405+ unsigned int i, len = 0;
10406 int key;
10407 unsigned int v;
10408
10409diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10410index 5b577d5..3c1fed4 100644
10411--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10412+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10413@@ -8,6 +8,8 @@
10414 * including this sentence is retained in full.
10415 */
10416
10417+#include <asm/alternative-asm.h>
10418+
10419 .extern crypto_ft_tab
10420 .extern crypto_it_tab
10421 .extern crypto_fl_tab
10422@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
10423 je B192; \
10424 leaq 32(r9),r9;
10425
10426+#define ret pax_force_retaddr 0, 1; ret
10427+
10428 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10429 movq r1,r2; \
10430 movq r3,r4; \
10431diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10432index 3470624..201259d 100644
10433--- a/arch/x86/crypto/aesni-intel_asm.S
10434+++ b/arch/x86/crypto/aesni-intel_asm.S
10435@@ -31,6 +31,7 @@
10436
10437 #include <linux/linkage.h>
10438 #include <asm/inst.h>
10439+#include <asm/alternative-asm.h>
10440
10441 #ifdef __x86_64__
10442 .data
10443@@ -1436,7 +1437,9 @@ _return_T_done_decrypt:
10444 pop %r14
10445 pop %r13
10446 pop %r12
10447+ pax_force_retaddr 0, 1
10448 ret
10449+ENDPROC(aesni_gcm_dec)
10450
10451
10452 /*****************************************************************************
10453@@ -1699,7 +1702,9 @@ _return_T_done_encrypt:
10454 pop %r14
10455 pop %r13
10456 pop %r12
10457+ pax_force_retaddr 0, 1
10458 ret
10459+ENDPROC(aesni_gcm_enc)
10460
10461 #endif
10462
10463@@ -1714,6 +1719,7 @@ _key_expansion_256a:
10464 pxor %xmm1, %xmm0
10465 movaps %xmm0, (TKEYP)
10466 add $0x10, TKEYP
10467+ pax_force_retaddr_bts
10468 ret
10469
10470 .align 4
10471@@ -1738,6 +1744,7 @@ _key_expansion_192a:
10472 shufps $0b01001110, %xmm2, %xmm1
10473 movaps %xmm1, 0x10(TKEYP)
10474 add $0x20, TKEYP
10475+ pax_force_retaddr_bts
10476 ret
10477
10478 .align 4
10479@@ -1757,6 +1764,7 @@ _key_expansion_192b:
10480
10481 movaps %xmm0, (TKEYP)
10482 add $0x10, TKEYP
10483+ pax_force_retaddr_bts
10484 ret
10485
10486 .align 4
10487@@ -1769,6 +1777,7 @@ _key_expansion_256b:
10488 pxor %xmm1, %xmm2
10489 movaps %xmm2, (TKEYP)
10490 add $0x10, TKEYP
10491+ pax_force_retaddr_bts
10492 ret
10493
10494 /*
10495@@ -1881,7 +1890,9 @@ ENTRY(aesni_set_key)
10496 #ifndef __x86_64__
10497 popl KEYP
10498 #endif
10499+ pax_force_retaddr 0, 1
10500 ret
10501+ENDPROC(aesni_set_key)
10502
10503 /*
10504 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
10505@@ -1902,7 +1913,9 @@ ENTRY(aesni_enc)
10506 popl KLEN
10507 popl KEYP
10508 #endif
10509+ pax_force_retaddr 0, 1
10510 ret
10511+ENDPROC(aesni_enc)
10512
10513 /*
10514 * _aesni_enc1: internal ABI
10515@@ -1959,6 +1972,7 @@ _aesni_enc1:
10516 AESENC KEY STATE
10517 movaps 0x70(TKEYP), KEY
10518 AESENCLAST KEY STATE
10519+ pax_force_retaddr_bts
10520 ret
10521
10522 /*
10523@@ -2067,6 +2081,7 @@ _aesni_enc4:
10524 AESENCLAST KEY STATE2
10525 AESENCLAST KEY STATE3
10526 AESENCLAST KEY STATE4
10527+ pax_force_retaddr_bts
10528 ret
10529
10530 /*
10531@@ -2089,7 +2104,9 @@ ENTRY(aesni_dec)
10532 popl KLEN
10533 popl KEYP
10534 #endif
10535+ pax_force_retaddr 0, 1
10536 ret
10537+ENDPROC(aesni_dec)
10538
10539 /*
10540 * _aesni_dec1: internal ABI
10541@@ -2146,6 +2163,7 @@ _aesni_dec1:
10542 AESDEC KEY STATE
10543 movaps 0x70(TKEYP), KEY
10544 AESDECLAST KEY STATE
10545+ pax_force_retaddr_bts
10546 ret
10547
10548 /*
10549@@ -2254,6 +2272,7 @@ _aesni_dec4:
10550 AESDECLAST KEY STATE2
10551 AESDECLAST KEY STATE3
10552 AESDECLAST KEY STATE4
10553+ pax_force_retaddr_bts
10554 ret
10555
10556 /*
10557@@ -2311,7 +2330,9 @@ ENTRY(aesni_ecb_enc)
10558 popl KEYP
10559 popl LEN
10560 #endif
10561+ pax_force_retaddr 0, 1
10562 ret
10563+ENDPROC(aesni_ecb_enc)
10564
10565 /*
10566 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10567@@ -2369,7 +2390,9 @@ ENTRY(aesni_ecb_dec)
10568 popl KEYP
10569 popl LEN
10570 #endif
10571+ pax_force_retaddr 0, 1
10572 ret
10573+ENDPROC(aesni_ecb_dec)
10574
10575 /*
10576 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10577@@ -2410,7 +2433,9 @@ ENTRY(aesni_cbc_enc)
10578 popl LEN
10579 popl IVP
10580 #endif
10581+ pax_force_retaddr 0, 1
10582 ret
10583+ENDPROC(aesni_cbc_enc)
10584
10585 /*
10586 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
10587@@ -2500,7 +2525,9 @@ ENTRY(aesni_cbc_dec)
10588 popl LEN
10589 popl IVP
10590 #endif
10591+ pax_force_retaddr 0, 1
10592 ret
10593+ENDPROC(aesni_cbc_dec)
10594
10595 #ifdef __x86_64__
10596 .align 16
10597@@ -2526,6 +2553,7 @@ _aesni_inc_init:
10598 mov $1, TCTR_LOW
10599 MOVQ_R64_XMM TCTR_LOW INC
10600 MOVQ_R64_XMM CTR TCTR_LOW
10601+ pax_force_retaddr_bts
10602 ret
10603
10604 /*
10605@@ -2554,6 +2582,7 @@ _aesni_inc:
10606 .Linc_low:
10607 movaps CTR, IV
10608 PSHUFB_XMM BSWAP_MASK IV
10609+ pax_force_retaddr_bts
10610 ret
10611
10612 /*
10613@@ -2614,5 +2643,7 @@ ENTRY(aesni_ctr_enc)
10614 .Lctr_enc_ret:
10615 movups IV, (IVP)
10616 .Lctr_enc_just_ret:
10617+ pax_force_retaddr 0, 1
10618 ret
10619+ENDPROC(aesni_ctr_enc)
10620 #endif
10621diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10622index 391d245..67f35c2 100644
10623--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10624+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10625@@ -20,6 +20,8 @@
10626 *
10627 */
10628
10629+#include <asm/alternative-asm.h>
10630+
10631 .file "blowfish-x86_64-asm.S"
10632 .text
10633
10634@@ -151,9 +153,11 @@ __blowfish_enc_blk:
10635 jnz __enc_xor;
10636
10637 write_block();
10638+ pax_force_retaddr 0, 1
10639 ret;
10640 __enc_xor:
10641 xor_block();
10642+ pax_force_retaddr 0, 1
10643 ret;
10644
10645 .align 8
10646@@ -188,6 +192,7 @@ blowfish_dec_blk:
10647
10648 movq %r11, %rbp;
10649
10650+ pax_force_retaddr 0, 1
10651 ret;
10652
10653 /**********************************************************************
10654@@ -342,6 +347,7 @@ __blowfish_enc_blk_4way:
10655
10656 popq %rbx;
10657 popq %rbp;
10658+ pax_force_retaddr 0, 1
10659 ret;
10660
10661 __enc_xor4:
10662@@ -349,6 +355,7 @@ __enc_xor4:
10663
10664 popq %rbx;
10665 popq %rbp;
10666+ pax_force_retaddr 0, 1
10667 ret;
10668
10669 .align 8
10670@@ -386,5 +393,6 @@ blowfish_dec_blk_4way:
10671 popq %rbx;
10672 popq %rbp;
10673
10674+ pax_force_retaddr 0, 1
10675 ret;
10676
10677diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
10678index 0b33743..7a56206 100644
10679--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
10680+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
10681@@ -20,6 +20,8 @@
10682 *
10683 */
10684
10685+#include <asm/alternative-asm.h>
10686+
10687 .file "camellia-x86_64-asm_64.S"
10688 .text
10689
10690@@ -229,12 +231,14 @@ __enc_done:
10691 enc_outunpack(mov, RT1);
10692
10693 movq RRBP, %rbp;
10694+ pax_force_retaddr 0, 1
10695 ret;
10696
10697 __enc_xor:
10698 enc_outunpack(xor, RT1);
10699
10700 movq RRBP, %rbp;
10701+ pax_force_retaddr 0, 1
10702 ret;
10703
10704 .global camellia_dec_blk;
10705@@ -275,6 +279,7 @@ __dec_rounds16:
10706 dec_outunpack();
10707
10708 movq RRBP, %rbp;
10709+ pax_force_retaddr 0, 1
10710 ret;
10711
10712 /**********************************************************************
10713@@ -468,6 +473,7 @@ __enc2_done:
10714
10715 movq RRBP, %rbp;
10716 popq %rbx;
10717+ pax_force_retaddr 0, 1
10718 ret;
10719
10720 __enc2_xor:
10721@@ -475,6 +481,7 @@ __enc2_xor:
10722
10723 movq RRBP, %rbp;
10724 popq %rbx;
10725+ pax_force_retaddr 0, 1
10726 ret;
10727
10728 .global camellia_dec_blk_2way;
10729@@ -517,4 +524,5 @@ __dec2_rounds16:
10730
10731 movq RRBP, %rbp;
10732 movq RXOR, %rbx;
10733+ pax_force_retaddr 0, 1
10734 ret;
10735diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10736index 15b00ac..2071784 100644
10737--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10738+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
10739@@ -23,6 +23,8 @@
10740 *
10741 */
10742
10743+#include <asm/alternative-asm.h>
10744+
10745 .file "cast5-avx-x86_64-asm_64.S"
10746
10747 .extern cast_s1
10748@@ -281,6 +283,7 @@ __skip_enc:
10749 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10750 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10751
10752+ pax_force_retaddr 0, 1
10753 ret;
10754
10755 .align 16
10756@@ -353,6 +356,7 @@ __dec_tail:
10757 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
10758 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
10759
10760+ pax_force_retaddr 0, 1
10761 ret;
10762
10763 __skip_dec:
10764@@ -392,6 +396,7 @@ cast5_ecb_enc_16way:
10765 vmovdqu RR4, (6*4*4)(%r11);
10766 vmovdqu RL4, (7*4*4)(%r11);
10767
10768+ pax_force_retaddr
10769 ret;
10770
10771 .align 16
10772@@ -427,6 +432,7 @@ cast5_ecb_dec_16way:
10773 vmovdqu RR4, (6*4*4)(%r11);
10774 vmovdqu RL4, (7*4*4)(%r11);
10775
10776+ pax_force_retaddr
10777 ret;
10778
10779 .align 16
10780@@ -479,6 +485,7 @@ cast5_cbc_dec_16way:
10781
10782 popq %r12;
10783
10784+ pax_force_retaddr
10785 ret;
10786
10787 .align 16
10788@@ -555,4 +562,5 @@ cast5_ctr_16way:
10789
10790 popq %r12;
10791
10792+ pax_force_retaddr
10793 ret;
10794diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10795index 2569d0d..637c289 100644
10796--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10797+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
10798@@ -23,6 +23,8 @@
10799 *
10800 */
10801
10802+#include <asm/alternative-asm.h>
10803+
10804 #include "glue_helper-asm-avx.S"
10805
10806 .file "cast6-avx-x86_64-asm_64.S"
10807@@ -294,6 +296,7 @@ __cast6_enc_blk8:
10808 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
10809 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
10810
10811+ pax_force_retaddr 0, 1
10812 ret;
10813
10814 .align 8
10815@@ -340,6 +343,7 @@ __cast6_dec_blk8:
10816 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
10817 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
10818
10819+ pax_force_retaddr 0, 1
10820 ret;
10821
10822 .align 8
10823@@ -361,6 +365,7 @@ cast6_ecb_enc_8way:
10824
10825 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10826
10827+ pax_force_retaddr
10828 ret;
10829
10830 .align 8
10831@@ -382,6 +387,7 @@ cast6_ecb_dec_8way:
10832
10833 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10834
10835+ pax_force_retaddr
10836 ret;
10837
10838 .align 8
10839@@ -408,6 +414,7 @@ cast6_cbc_dec_8way:
10840
10841 popq %r12;
10842
10843+ pax_force_retaddr
10844 ret;
10845
10846 .align 8
10847@@ -436,4 +443,5 @@ cast6_ctr_8way:
10848
10849 popq %r12;
10850
10851+ pax_force_retaddr
10852 ret;
10853diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
10854index 6214a9b..1f4fc9a 100644
10855--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
10856+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
10857@@ -1,3 +1,5 @@
10858+#include <asm/alternative-asm.h>
10859+
10860 # enter ECRYPT_encrypt_bytes
10861 .text
10862 .p2align 5
10863@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
10864 add %r11,%rsp
10865 mov %rdi,%rax
10866 mov %rsi,%rdx
10867+ pax_force_retaddr 0, 1
10868 ret
10869 # bytesatleast65:
10870 ._bytesatleast65:
10871@@ -891,6 +894,7 @@ ECRYPT_keysetup:
10872 add %r11,%rsp
10873 mov %rdi,%rax
10874 mov %rsi,%rdx
10875+ pax_force_retaddr
10876 ret
10877 # enter ECRYPT_ivsetup
10878 .text
10879@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
10880 add %r11,%rsp
10881 mov %rdi,%rax
10882 mov %rsi,%rdx
10883+ pax_force_retaddr
10884 ret
10885diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10886index 02b0e9f..cf4cf5c 100644
10887--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10888+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
10889@@ -24,6 +24,8 @@
10890 *
10891 */
10892
10893+#include <asm/alternative-asm.h>
10894+
10895 #include "glue_helper-asm-avx.S"
10896
10897 .file "serpent-avx-x86_64-asm_64.S"
10898@@ -618,6 +620,7 @@ __serpent_enc_blk8_avx:
10899 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10900 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10901
10902+ pax_force_retaddr
10903 ret;
10904
10905 .align 8
10906@@ -673,6 +676,7 @@ __serpent_dec_blk8_avx:
10907 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
10908 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
10909
10910+ pax_force_retaddr
10911 ret;
10912
10913 .align 8
10914@@ -692,6 +696,7 @@ serpent_ecb_enc_8way_avx:
10915
10916 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10917
10918+ pax_force_retaddr
10919 ret;
10920
10921 .align 8
10922@@ -711,6 +716,7 @@ serpent_ecb_dec_8way_avx:
10923
10924 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
10925
10926+ pax_force_retaddr
10927 ret;
10928
10929 .align 8
10930@@ -730,6 +736,7 @@ serpent_cbc_dec_8way_avx:
10931
10932 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
10933
10934+ pax_force_retaddr
10935 ret;
10936
10937 .align 8
10938@@ -751,4 +758,5 @@ serpent_ctr_8way_avx:
10939
10940 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
10941
10942+ pax_force_retaddr
10943 ret;
10944diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10945index 3ee1ff0..cbc568b 100644
10946--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10947+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
10948@@ -24,6 +24,8 @@
10949 *
10950 */
10951
10952+#include <asm/alternative-asm.h>
10953+
10954 .file "serpent-sse2-x86_64-asm_64.S"
10955 .text
10956
10957@@ -692,12 +694,14 @@ __serpent_enc_blk_8way:
10958 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10959 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10960
10961+ pax_force_retaddr
10962 ret;
10963
10964 __enc_xor8:
10965 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
10966 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
10967
10968+ pax_force_retaddr
10969 ret;
10970
10971 .align 8
10972@@ -755,4 +759,5 @@ serpent_dec_blk_8way:
10973 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
10974 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
10975
10976+ pax_force_retaddr
10977 ret;
10978diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
10979index 49d6987..df66bd4 100644
10980--- a/arch/x86/crypto/sha1_ssse3_asm.S
10981+++ b/arch/x86/crypto/sha1_ssse3_asm.S
10982@@ -28,6 +28,8 @@
10983 * (at your option) any later version.
10984 */
10985
10986+#include <asm/alternative-asm.h>
10987+
10988 #define CTX %rdi // arg1
10989 #define BUF %rsi // arg2
10990 #define CNT %rdx // arg3
10991@@ -104,6 +106,7 @@
10992 pop %r12
10993 pop %rbp
10994 pop %rbx
10995+ pax_force_retaddr 0, 1
10996 ret
10997
10998 .size \name, .-\name
10999diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11000index ebac16b..8092eb9 100644
11001--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11002+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11003@@ -23,6 +23,8 @@
11004 *
11005 */
11006
11007+#include <asm/alternative-asm.h>
11008+
11009 #include "glue_helper-asm-avx.S"
11010
11011 .file "twofish-avx-x86_64-asm_64.S"
11012@@ -283,6 +285,7 @@ __twofish_enc_blk8:
11013 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11014 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11015
11016+ pax_force_retaddr 0, 1
11017 ret;
11018
11019 .align 8
11020@@ -324,6 +327,7 @@ __twofish_dec_blk8:
11021 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11022 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11023
11024+ pax_force_retaddr 0, 1
11025 ret;
11026
11027 .align 8
11028@@ -345,6 +349,7 @@ twofish_ecb_enc_8way:
11029
11030 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11031
11032+ pax_force_retaddr 0, 1
11033 ret;
11034
11035 .align 8
11036@@ -366,6 +371,7 @@ twofish_ecb_dec_8way:
11037
11038 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11039
11040+ pax_force_retaddr 0, 1
11041 ret;
11042
11043 .align 8
11044@@ -392,6 +398,7 @@ twofish_cbc_dec_8way:
11045
11046 popq %r12;
11047
11048+ pax_force_retaddr 0, 1
11049 ret;
11050
11051 .align 8
11052@@ -420,4 +427,5 @@ twofish_ctr_8way:
11053
11054 popq %r12;
11055
11056+ pax_force_retaddr 0, 1
11057 ret;
11058diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11059index 5b012a2..36d5364 100644
11060--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11061+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11062@@ -20,6 +20,8 @@
11063 *
11064 */
11065
11066+#include <asm/alternative-asm.h>
11067+
11068 .file "twofish-x86_64-asm-3way.S"
11069 .text
11070
11071@@ -260,6 +262,7 @@ __twofish_enc_blk_3way:
11072 popq %r13;
11073 popq %r14;
11074 popq %r15;
11075+ pax_force_retaddr 0, 1
11076 ret;
11077
11078 __enc_xor3:
11079@@ -271,6 +274,7 @@ __enc_xor3:
11080 popq %r13;
11081 popq %r14;
11082 popq %r15;
11083+ pax_force_retaddr 0, 1
11084 ret;
11085
11086 .global twofish_dec_blk_3way
11087@@ -312,5 +316,6 @@ twofish_dec_blk_3way:
11088 popq %r13;
11089 popq %r14;
11090 popq %r15;
11091+ pax_force_retaddr 0, 1
11092 ret;
11093
11094diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11095index 7bcf3fc..f53832f 100644
11096--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11097+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11098@@ -21,6 +21,7 @@
11099 .text
11100
11101 #include <asm/asm-offsets.h>
11102+#include <asm/alternative-asm.h>
11103
11104 #define a_offset 0
11105 #define b_offset 4
11106@@ -268,6 +269,7 @@ twofish_enc_blk:
11107
11108 popq R1
11109 movq $1,%rax
11110+ pax_force_retaddr 0, 1
11111 ret
11112
11113 twofish_dec_blk:
11114@@ -319,4 +321,5 @@ twofish_dec_blk:
11115
11116 popq R1
11117 movq $1,%rax
11118+ pax_force_retaddr 0, 1
11119 ret
11120diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11121index a703af1..f5b9c36 100644
11122--- a/arch/x86/ia32/ia32_aout.c
11123+++ b/arch/x86/ia32/ia32_aout.c
11124@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11125 unsigned long dump_start, dump_size;
11126 struct user32 dump;
11127
11128+ memset(&dump, 0, sizeof(dump));
11129+
11130 fs = get_fs();
11131 set_fs(KERNEL_DS);
11132 has_dumped = 1;
11133diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11134index a1daf4a..f8c4537 100644
11135--- a/arch/x86/ia32/ia32_signal.c
11136+++ b/arch/x86/ia32/ia32_signal.c
11137@@ -348,7 +348,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
11138 sp -= frame_size;
11139 /* Align the stack pointer according to the i386 ABI,
11140 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11141- sp = ((sp + 4) & -16ul) - 4;
11142+ sp = ((sp - 12) & -16ul) - 4;
11143 return (void __user *) sp;
11144 }
11145
11146@@ -406,7 +406,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
11147 * These are actually not used anymore, but left because some
11148 * gdb versions depend on them as a marker.
11149 */
11150- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11151+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11152 } put_user_catch(err);
11153
11154 if (err)
11155@@ -448,7 +448,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11156 0xb8,
11157 __NR_ia32_rt_sigreturn,
11158 0x80cd,
11159- 0,
11160+ 0
11161 };
11162
11163 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
11164@@ -471,16 +471,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
11165
11166 if (ka->sa.sa_flags & SA_RESTORER)
11167 restorer = ka->sa.sa_restorer;
11168+ else if (current->mm->context.vdso)
11169+ /* Return stub is in 32bit vsyscall page */
11170+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11171 else
11172- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11173- rt_sigreturn);
11174+ restorer = &frame->retcode;
11175 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11176
11177 /*
11178 * Not actually used anymore, but left because some gdb
11179 * versions need it.
11180 */
11181- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11182+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11183 } put_user_catch(err);
11184
11185 err |= copy_siginfo_to_user32(&frame->info, info);
11186diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11187index 142c4ce..19b683f 100644
11188--- a/arch/x86/ia32/ia32entry.S
11189+++ b/arch/x86/ia32/ia32entry.S
11190@@ -15,8 +15,10 @@
11191 #include <asm/irqflags.h>
11192 #include <asm/asm.h>
11193 #include <asm/smap.h>
11194+#include <asm/pgtable.h>
11195 #include <linux/linkage.h>
11196 #include <linux/err.h>
11197+#include <asm/alternative-asm.h>
11198
11199 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11200 #include <linux/elf-em.h>
11201@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11202 ENDPROC(native_irq_enable_sysexit)
11203 #endif
11204
11205+ .macro pax_enter_kernel_user
11206+ pax_set_fptr_mask
11207+#ifdef CONFIG_PAX_MEMORY_UDEREF
11208+ call pax_enter_kernel_user
11209+#endif
11210+ .endm
11211+
11212+ .macro pax_exit_kernel_user
11213+#ifdef CONFIG_PAX_MEMORY_UDEREF
11214+ call pax_exit_kernel_user
11215+#endif
11216+#ifdef CONFIG_PAX_RANDKSTACK
11217+ pushq %rax
11218+ pushq %r11
11219+ call pax_randomize_kstack
11220+ popq %r11
11221+ popq %rax
11222+#endif
11223+ .endm
11224+
11225+.macro pax_erase_kstack
11226+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11227+ call pax_erase_kstack
11228+#endif
11229+.endm
11230+
11231 /*
11232 * 32bit SYSENTER instruction entry.
11233 *
11234@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11235 CFI_REGISTER rsp,rbp
11236 SWAPGS_UNSAFE_STACK
11237 movq PER_CPU_VAR(kernel_stack), %rsp
11238- addq $(KERNEL_STACK_OFFSET),%rsp
11239- /*
11240- * No need to follow this irqs on/off section: the syscall
11241- * disabled irqs, here we enable it straight after entry:
11242- */
11243- ENABLE_INTERRUPTS(CLBR_NONE)
11244 movl %ebp,%ebp /* zero extension */
11245 pushq_cfi $__USER32_DS
11246 /*CFI_REL_OFFSET ss,0*/
11247@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11248 CFI_REL_OFFSET rsp,0
11249 pushfq_cfi
11250 /*CFI_REL_OFFSET rflags,0*/
11251- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11252- CFI_REGISTER rip,r10
11253+ orl $X86_EFLAGS_IF,(%rsp)
11254+ GET_THREAD_INFO(%r11)
11255+ movl TI_sysenter_return(%r11), %r11d
11256+ CFI_REGISTER rip,r11
11257 pushq_cfi $__USER32_CS
11258 /*CFI_REL_OFFSET cs,0*/
11259 movl %eax, %eax
11260- pushq_cfi %r10
11261+ pushq_cfi %r11
11262 CFI_REL_OFFSET rip,0
11263 pushq_cfi %rax
11264 cld
11265 SAVE_ARGS 0,1,0
11266+ pax_enter_kernel_user
11267+
11268+#ifdef CONFIG_PAX_RANDKSTACK
11269+ pax_erase_kstack
11270+#endif
11271+
11272+ /*
11273+ * No need to follow this irqs on/off section: the syscall
11274+ * disabled irqs, here we enable it straight after entry:
11275+ */
11276+ ENABLE_INTERRUPTS(CLBR_NONE)
11277 /* no need to do an access_ok check here because rbp has been
11278 32bit zero extended */
11279+
11280+#ifdef CONFIG_PAX_MEMORY_UDEREF
11281+ mov $PAX_USER_SHADOW_BASE,%r11
11282+ add %r11,%rbp
11283+#endif
11284+
11285 ASM_STAC
11286 1: movl (%rbp),%ebp
11287 _ASM_EXTABLE(1b,ia32_badarg)
11288 ASM_CLAC
11289- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11290- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11291+ GET_THREAD_INFO(%r11)
11292+ orl $TS_COMPAT,TI_status(%r11)
11293+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11294 CFI_REMEMBER_STATE
11295 jnz sysenter_tracesys
11296 cmpq $(IA32_NR_syscalls-1),%rax
11297@@ -162,12 +204,15 @@ sysenter_do_call:
11298 sysenter_dispatch:
11299 call *ia32_sys_call_table(,%rax,8)
11300 movq %rax,RAX-ARGOFFSET(%rsp)
11301+ GET_THREAD_INFO(%r11)
11302 DISABLE_INTERRUPTS(CLBR_NONE)
11303 TRACE_IRQS_OFF
11304- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11305+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11306 jnz sysexit_audit
11307 sysexit_from_sys_call:
11308- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11309+ pax_exit_kernel_user
11310+ pax_erase_kstack
11311+ andl $~TS_COMPAT,TI_status(%r11)
11312 /* clear IF, that popfq doesn't enable interrupts early */
11313 andl $~0x200,EFLAGS-R11(%rsp)
11314 movl RIP-R11(%rsp),%edx /* User %eip */
11315@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11316 movl %eax,%esi /* 2nd arg: syscall number */
11317 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11318 call __audit_syscall_entry
11319+
11320+ pax_erase_kstack
11321+
11322 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11323 cmpq $(IA32_NR_syscalls-1),%rax
11324 ja ia32_badsys
11325@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11326 .endm
11327
11328 .macro auditsys_exit exit
11329- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11330+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11331 jnz ia32_ret_from_sys_call
11332 TRACE_IRQS_ON
11333 ENABLE_INTERRUPTS(CLBR_NONE)
11334@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11335 1: setbe %al /* 1 if error, 0 if not */
11336 movzbl %al,%edi /* zero-extend that into %edi */
11337 call __audit_syscall_exit
11338+ GET_THREAD_INFO(%r11)
11339 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11340 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11341 DISABLE_INTERRUPTS(CLBR_NONE)
11342 TRACE_IRQS_OFF
11343- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11344+ testl %edi,TI_flags(%r11)
11345 jz \exit
11346 CLEAR_RREGS -ARGOFFSET
11347 jmp int_with_check
11348@@ -237,7 +286,7 @@ sysexit_audit:
11349
11350 sysenter_tracesys:
11351 #ifdef CONFIG_AUDITSYSCALL
11352- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11353+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11354 jz sysenter_auditsys
11355 #endif
11356 SAVE_REST
11357@@ -249,6 +298,9 @@ sysenter_tracesys:
11358 RESTORE_REST
11359 cmpq $(IA32_NR_syscalls-1),%rax
11360 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11361+
11362+ pax_erase_kstack
11363+
11364 jmp sysenter_do_call
11365 CFI_ENDPROC
11366 ENDPROC(ia32_sysenter_target)
11367@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11368 ENTRY(ia32_cstar_target)
11369 CFI_STARTPROC32 simple
11370 CFI_SIGNAL_FRAME
11371- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11372+ CFI_DEF_CFA rsp,0
11373 CFI_REGISTER rip,rcx
11374 /*CFI_REGISTER rflags,r11*/
11375 SWAPGS_UNSAFE_STACK
11376 movl %esp,%r8d
11377 CFI_REGISTER rsp,r8
11378 movq PER_CPU_VAR(kernel_stack),%rsp
11379+ SAVE_ARGS 8*6,0,0
11380+ pax_enter_kernel_user
11381+
11382+#ifdef CONFIG_PAX_RANDKSTACK
11383+ pax_erase_kstack
11384+#endif
11385+
11386 /*
11387 * No need to follow this irqs on/off section: the syscall
11388 * disabled irqs and here we enable it straight after entry:
11389 */
11390 ENABLE_INTERRUPTS(CLBR_NONE)
11391- SAVE_ARGS 8,0,0
11392 movl %eax,%eax /* zero extension */
11393 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11394 movq %rcx,RIP-ARGOFFSET(%rsp)
11395@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11396 /* no need to do an access_ok check here because r8 has been
11397 32bit zero extended */
11398 /* hardware stack frame is complete now */
11399+
11400+#ifdef CONFIG_PAX_MEMORY_UDEREF
11401+ mov $PAX_USER_SHADOW_BASE,%r11
11402+ add %r11,%r8
11403+#endif
11404+
11405 ASM_STAC
11406 1: movl (%r8),%r9d
11407 _ASM_EXTABLE(1b,ia32_badarg)
11408 ASM_CLAC
11409- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11410- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11411+ GET_THREAD_INFO(%r11)
11412+ orl $TS_COMPAT,TI_status(%r11)
11413+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11414 CFI_REMEMBER_STATE
11415 jnz cstar_tracesys
11416 cmpq $IA32_NR_syscalls-1,%rax
11417@@ -319,12 +384,15 @@ cstar_do_call:
11418 cstar_dispatch:
11419 call *ia32_sys_call_table(,%rax,8)
11420 movq %rax,RAX-ARGOFFSET(%rsp)
11421+ GET_THREAD_INFO(%r11)
11422 DISABLE_INTERRUPTS(CLBR_NONE)
11423 TRACE_IRQS_OFF
11424- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11425+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11426 jnz sysretl_audit
11427 sysretl_from_sys_call:
11428- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11429+ pax_exit_kernel_user
11430+ pax_erase_kstack
11431+ andl $~TS_COMPAT,TI_status(%r11)
11432 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11433 movl RIP-ARGOFFSET(%rsp),%ecx
11434 CFI_REGISTER rip,rcx
11435@@ -352,7 +420,7 @@ sysretl_audit:
11436
11437 cstar_tracesys:
11438 #ifdef CONFIG_AUDITSYSCALL
11439- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11440+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11441 jz cstar_auditsys
11442 #endif
11443 xchgl %r9d,%ebp
11444@@ -366,6 +434,9 @@ cstar_tracesys:
11445 xchgl %ebp,%r9d
11446 cmpq $(IA32_NR_syscalls-1),%rax
11447 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11448+
11449+ pax_erase_kstack
11450+
11451 jmp cstar_do_call
11452 END(ia32_cstar_target)
11453
11454@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11455 CFI_REL_OFFSET rip,RIP-RIP
11456 PARAVIRT_ADJUST_EXCEPTION_FRAME
11457 SWAPGS
11458- /*
11459- * No need to follow this irqs on/off section: the syscall
11460- * disabled irqs and here we enable it straight after entry:
11461- */
11462- ENABLE_INTERRUPTS(CLBR_NONE)
11463 movl %eax,%eax
11464 pushq_cfi %rax
11465 cld
11466 /* note the registers are not zero extended to the sf.
11467 this could be a problem. */
11468 SAVE_ARGS 0,1,0
11469- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11470- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11471+ pax_enter_kernel_user
11472+
11473+#ifdef CONFIG_PAX_RANDKSTACK
11474+ pax_erase_kstack
11475+#endif
11476+
11477+ /*
11478+ * No need to follow this irqs on/off section: the syscall
11479+ * disabled irqs and here we enable it straight after entry:
11480+ */
11481+ ENABLE_INTERRUPTS(CLBR_NONE)
11482+ GET_THREAD_INFO(%r11)
11483+ orl $TS_COMPAT,TI_status(%r11)
11484+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11485 jnz ia32_tracesys
11486 cmpq $(IA32_NR_syscalls-1),%rax
11487 ja ia32_badsys
11488@@ -442,6 +520,9 @@ ia32_tracesys:
11489 RESTORE_REST
11490 cmpq $(IA32_NR_syscalls-1),%rax
11491 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11492+
11493+ pax_erase_kstack
11494+
11495 jmp ia32_do_call
11496 END(ia32_syscall)
11497
11498diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11499index d0b689b..34be51d 100644
11500--- a/arch/x86/ia32/sys_ia32.c
11501+++ b/arch/x86/ia32/sys_ia32.c
11502@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11503 */
11504 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11505 {
11506- typeof(ubuf->st_uid) uid = 0;
11507- typeof(ubuf->st_gid) gid = 0;
11508+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
11509+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
11510 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11511 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11512 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11513@@ -303,7 +303,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
11514 mm_segment_t old_fs = get_fs();
11515
11516 set_fs(KERNEL_DS);
11517- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
11518+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
11519 set_fs(old_fs);
11520 if (put_compat_timespec(&t, interval))
11521 return -EFAULT;
11522@@ -319,7 +319,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
11523 mm_segment_t old_fs = get_fs();
11524
11525 set_fs(KERNEL_DS);
11526- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
11527+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
11528 set_fs(old_fs);
11529 if (!ret) {
11530 switch (_NSIG_WORDS) {
11531@@ -344,7 +344,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
11532 if (copy_siginfo_from_user32(&info, uinfo))
11533 return -EFAULT;
11534 set_fs(KERNEL_DS);
11535- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
11536+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
11537 set_fs(old_fs);
11538 return ret;
11539 }
11540@@ -376,7 +376,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
11541 return -EFAULT;
11542
11543 set_fs(KERNEL_DS);
11544- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
11545+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
11546 count);
11547 set_fs(old_fs);
11548
11549diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11550index 372231c..a5aa1a1 100644
11551--- a/arch/x86/include/asm/alternative-asm.h
11552+++ b/arch/x86/include/asm/alternative-asm.h
11553@@ -18,6 +18,45 @@
11554 .endm
11555 #endif
11556
11557+#ifdef KERNEXEC_PLUGIN
11558+ .macro pax_force_retaddr_bts rip=0
11559+ btsq $63,\rip(%rsp)
11560+ .endm
11561+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11562+ .macro pax_force_retaddr rip=0, reload=0
11563+ btsq $63,\rip(%rsp)
11564+ .endm
11565+ .macro pax_force_fptr ptr
11566+ btsq $63,\ptr
11567+ .endm
11568+ .macro pax_set_fptr_mask
11569+ .endm
11570+#endif
11571+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11572+ .macro pax_force_retaddr rip=0, reload=0
11573+ .if \reload
11574+ pax_set_fptr_mask
11575+ .endif
11576+ orq %r10,\rip(%rsp)
11577+ .endm
11578+ .macro pax_force_fptr ptr
11579+ orq %r10,\ptr
11580+ .endm
11581+ .macro pax_set_fptr_mask
11582+ movabs $0x8000000000000000,%r10
11583+ .endm
11584+#endif
11585+#else
11586+ .macro pax_force_retaddr rip=0, reload=0
11587+ .endm
11588+ .macro pax_force_fptr ptr
11589+ .endm
11590+ .macro pax_force_retaddr_bts rip=0
11591+ .endm
11592+ .macro pax_set_fptr_mask
11593+ .endm
11594+#endif
11595+
11596 .macro altinstruction_entry orig alt feature orig_len alt_len
11597 .long \orig - .
11598 .long \alt - .
11599diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11600index 58ed6d9..f1cbe58 100644
11601--- a/arch/x86/include/asm/alternative.h
11602+++ b/arch/x86/include/asm/alternative.h
11603@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11604 ".pushsection .discard,\"aw\",@progbits\n" \
11605 DISCARD_ENTRY(1) \
11606 ".popsection\n" \
11607- ".pushsection .altinstr_replacement, \"ax\"\n" \
11608+ ".pushsection .altinstr_replacement, \"a\"\n" \
11609 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11610 ".popsection"
11611
11612@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11613 DISCARD_ENTRY(1) \
11614 DISCARD_ENTRY(2) \
11615 ".popsection\n" \
11616- ".pushsection .altinstr_replacement, \"ax\"\n" \
11617+ ".pushsection .altinstr_replacement, \"a\"\n" \
11618 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11619 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11620 ".popsection"
11621diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11622index 3388034..050f0b9 100644
11623--- a/arch/x86/include/asm/apic.h
11624+++ b/arch/x86/include/asm/apic.h
11625@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11626
11627 #ifdef CONFIG_X86_LOCAL_APIC
11628
11629-extern unsigned int apic_verbosity;
11630+extern int apic_verbosity;
11631 extern int local_apic_timer_c2_ok;
11632
11633 extern int disable_apic;
11634diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11635index 20370c6..a2eb9b0 100644
11636--- a/arch/x86/include/asm/apm.h
11637+++ b/arch/x86/include/asm/apm.h
11638@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11639 __asm__ __volatile__(APM_DO_ZERO_SEGS
11640 "pushl %%edi\n\t"
11641 "pushl %%ebp\n\t"
11642- "lcall *%%cs:apm_bios_entry\n\t"
11643+ "lcall *%%ss:apm_bios_entry\n\t"
11644 "setc %%al\n\t"
11645 "popl %%ebp\n\t"
11646 "popl %%edi\n\t"
11647@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11648 __asm__ __volatile__(APM_DO_ZERO_SEGS
11649 "pushl %%edi\n\t"
11650 "pushl %%ebp\n\t"
11651- "lcall *%%cs:apm_bios_entry\n\t"
11652+ "lcall *%%ss:apm_bios_entry\n\t"
11653 "setc %%bl\n\t"
11654 "popl %%ebp\n\t"
11655 "popl %%edi\n\t"
11656diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11657index 722aa3b..3a0bb27 100644
11658--- a/arch/x86/include/asm/atomic.h
11659+++ b/arch/x86/include/asm/atomic.h
11660@@ -22,7 +22,18 @@
11661 */
11662 static inline int atomic_read(const atomic_t *v)
11663 {
11664- return (*(volatile int *)&(v)->counter);
11665+ return (*(volatile const int *)&(v)->counter);
11666+}
11667+
11668+/**
11669+ * atomic_read_unchecked - read atomic variable
11670+ * @v: pointer of type atomic_unchecked_t
11671+ *
11672+ * Atomically reads the value of @v.
11673+ */
11674+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
11675+{
11676+ return (*(volatile const int *)&(v)->counter);
11677 }
11678
11679 /**
11680@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
11681 }
11682
11683 /**
11684+ * atomic_set_unchecked - set atomic variable
11685+ * @v: pointer of type atomic_unchecked_t
11686+ * @i: required value
11687+ *
11688+ * Atomically sets the value of @v to @i.
11689+ */
11690+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
11691+{
11692+ v->counter = i;
11693+}
11694+
11695+/**
11696 * atomic_add - add integer to atomic variable
11697 * @i: integer value to add
11698 * @v: pointer of type atomic_t
11699@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
11700 */
11701 static inline void atomic_add(int i, atomic_t *v)
11702 {
11703- asm volatile(LOCK_PREFIX "addl %1,%0"
11704+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
11705+
11706+#ifdef CONFIG_PAX_REFCOUNT
11707+ "jno 0f\n"
11708+ LOCK_PREFIX "subl %1,%0\n"
11709+ "int $4\n0:\n"
11710+ _ASM_EXTABLE(0b, 0b)
11711+#endif
11712+
11713+ : "+m" (v->counter)
11714+ : "ir" (i));
11715+}
11716+
11717+/**
11718+ * atomic_add_unchecked - add integer to atomic variable
11719+ * @i: integer value to add
11720+ * @v: pointer of type atomic_unchecked_t
11721+ *
11722+ * Atomically adds @i to @v.
11723+ */
11724+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
11725+{
11726+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
11727 : "+m" (v->counter)
11728 : "ir" (i));
11729 }
11730@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
11731 */
11732 static inline void atomic_sub(int i, atomic_t *v)
11733 {
11734- asm volatile(LOCK_PREFIX "subl %1,%0"
11735+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
11736+
11737+#ifdef CONFIG_PAX_REFCOUNT
11738+ "jno 0f\n"
11739+ LOCK_PREFIX "addl %1,%0\n"
11740+ "int $4\n0:\n"
11741+ _ASM_EXTABLE(0b, 0b)
11742+#endif
11743+
11744+ : "+m" (v->counter)
11745+ : "ir" (i));
11746+}
11747+
11748+/**
11749+ * atomic_sub_unchecked - subtract integer from atomic variable
11750+ * @i: integer value to subtract
11751+ * @v: pointer of type atomic_unchecked_t
11752+ *
11753+ * Atomically subtracts @i from @v.
11754+ */
11755+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
11756+{
11757+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
11758 : "+m" (v->counter)
11759 : "ir" (i));
11760 }
11761@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11762 {
11763 unsigned char c;
11764
11765- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
11766+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
11767+
11768+#ifdef CONFIG_PAX_REFCOUNT
11769+ "jno 0f\n"
11770+ LOCK_PREFIX "addl %2,%0\n"
11771+ "int $4\n0:\n"
11772+ _ASM_EXTABLE(0b, 0b)
11773+#endif
11774+
11775+ "sete %1\n"
11776 : "+m" (v->counter), "=qm" (c)
11777 : "ir" (i) : "memory");
11778 return c;
11779@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
11780 */
11781 static inline void atomic_inc(atomic_t *v)
11782 {
11783- asm volatile(LOCK_PREFIX "incl %0"
11784+ asm volatile(LOCK_PREFIX "incl %0\n"
11785+
11786+#ifdef CONFIG_PAX_REFCOUNT
11787+ "jno 0f\n"
11788+ LOCK_PREFIX "decl %0\n"
11789+ "int $4\n0:\n"
11790+ _ASM_EXTABLE(0b, 0b)
11791+#endif
11792+
11793+ : "+m" (v->counter));
11794+}
11795+
11796+/**
11797+ * atomic_inc_unchecked - increment atomic variable
11798+ * @v: pointer of type atomic_unchecked_t
11799+ *
11800+ * Atomically increments @v by 1.
11801+ */
11802+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
11803+{
11804+ asm volatile(LOCK_PREFIX "incl %0\n"
11805 : "+m" (v->counter));
11806 }
11807
11808@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
11809 */
11810 static inline void atomic_dec(atomic_t *v)
11811 {
11812- asm volatile(LOCK_PREFIX "decl %0"
11813+ asm volatile(LOCK_PREFIX "decl %0\n"
11814+
11815+#ifdef CONFIG_PAX_REFCOUNT
11816+ "jno 0f\n"
11817+ LOCK_PREFIX "incl %0\n"
11818+ "int $4\n0:\n"
11819+ _ASM_EXTABLE(0b, 0b)
11820+#endif
11821+
11822+ : "+m" (v->counter));
11823+}
11824+
11825+/**
11826+ * atomic_dec_unchecked - decrement atomic variable
11827+ * @v: pointer of type atomic_unchecked_t
11828+ *
11829+ * Atomically decrements @v by 1.
11830+ */
11831+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
11832+{
11833+ asm volatile(LOCK_PREFIX "decl %0\n"
11834 : "+m" (v->counter));
11835 }
11836
11837@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
11838 {
11839 unsigned char c;
11840
11841- asm volatile(LOCK_PREFIX "decl %0; sete %1"
11842+ asm volatile(LOCK_PREFIX "decl %0\n"
11843+
11844+#ifdef CONFIG_PAX_REFCOUNT
11845+ "jno 0f\n"
11846+ LOCK_PREFIX "incl %0\n"
11847+ "int $4\n0:\n"
11848+ _ASM_EXTABLE(0b, 0b)
11849+#endif
11850+
11851+ "sete %1\n"
11852 : "+m" (v->counter), "=qm" (c)
11853 : : "memory");
11854 return c != 0;
11855@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
11856 {
11857 unsigned char c;
11858
11859- asm volatile(LOCK_PREFIX "incl %0; sete %1"
11860+ asm volatile(LOCK_PREFIX "incl %0\n"
11861+
11862+#ifdef CONFIG_PAX_REFCOUNT
11863+ "jno 0f\n"
11864+ LOCK_PREFIX "decl %0\n"
11865+ "int $4\n0:\n"
11866+ _ASM_EXTABLE(0b, 0b)
11867+#endif
11868+
11869+ "sete %1\n"
11870+ : "+m" (v->counter), "=qm" (c)
11871+ : : "memory");
11872+ return c != 0;
11873+}
11874+
11875+/**
11876+ * atomic_inc_and_test_unchecked - increment and test
11877+ * @v: pointer of type atomic_unchecked_t
11878+ *
11879+ * Atomically increments @v by 1
11880+ * and returns true if the result is zero, or false for all
11881+ * other cases.
11882+ */
11883+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
11884+{
11885+ unsigned char c;
11886+
11887+ asm volatile(LOCK_PREFIX "incl %0\n"
11888+ "sete %1\n"
11889 : "+m" (v->counter), "=qm" (c)
11890 : : "memory");
11891 return c != 0;
11892@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
11893 {
11894 unsigned char c;
11895
11896- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
11897+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
11898+
11899+#ifdef CONFIG_PAX_REFCOUNT
11900+ "jno 0f\n"
11901+ LOCK_PREFIX "subl %2,%0\n"
11902+ "int $4\n0:\n"
11903+ _ASM_EXTABLE(0b, 0b)
11904+#endif
11905+
11906+ "sets %1\n"
11907 : "+m" (v->counter), "=qm" (c)
11908 : "ir" (i) : "memory");
11909 return c;
11910@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
11911 */
11912 static inline int atomic_add_return(int i, atomic_t *v)
11913 {
11914+ return i + xadd_check_overflow(&v->counter, i);
11915+}
11916+
11917+/**
11918+ * atomic_add_return_unchecked - add integer and return
11919+ * @i: integer value to add
11920+ * @v: pointer of type atomic_unchecked_t
11921+ *
11922+ * Atomically adds @i to @v and returns @i + @v
11923+ */
11924+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
11925+{
11926 return i + xadd(&v->counter, i);
11927 }
11928
11929@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
11930 }
11931
11932 #define atomic_inc_return(v) (atomic_add_return(1, v))
11933+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
11934+{
11935+ return atomic_add_return_unchecked(1, v);
11936+}
11937 #define atomic_dec_return(v) (atomic_sub_return(1, v))
11938
11939 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
11940@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
11941 return cmpxchg(&v->counter, old, new);
11942 }
11943
11944+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
11945+{
11946+ return cmpxchg(&v->counter, old, new);
11947+}
11948+
11949 static inline int atomic_xchg(atomic_t *v, int new)
11950 {
11951 return xchg(&v->counter, new);
11952 }
11953
11954+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
11955+{
11956+ return xchg(&v->counter, new);
11957+}
11958+
11959 /**
11960 * __atomic_add_unless - add unless the number is already a given value
11961 * @v: pointer of type atomic_t
11962@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
11963 */
11964 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
11965 {
11966- int c, old;
11967+ int c, old, new;
11968 c = atomic_read(v);
11969 for (;;) {
11970- if (unlikely(c == (u)))
11971+ if (unlikely(c == u))
11972 break;
11973- old = atomic_cmpxchg((v), c, c + (a));
11974+
11975+ asm volatile("addl %2,%0\n"
11976+
11977+#ifdef CONFIG_PAX_REFCOUNT
11978+ "jno 0f\n"
11979+ "subl %2,%0\n"
11980+ "int $4\n0:\n"
11981+ _ASM_EXTABLE(0b, 0b)
11982+#endif
11983+
11984+ : "=r" (new)
11985+ : "0" (c), "ir" (a));
11986+
11987+ old = atomic_cmpxchg(v, c, new);
11988 if (likely(old == c))
11989 break;
11990 c = old;
11991@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
11992 }
11993
11994 /**
11995+ * atomic_inc_not_zero_hint - increment if not null
11996+ * @v: pointer of type atomic_t
11997+ * @hint: probable value of the atomic before the increment
11998+ *
11999+ * This version of atomic_inc_not_zero() gives a hint of probable
12000+ * value of the atomic. This helps processor to not read the memory
12001+ * before doing the atomic read/modify/write cycle, lowering
12002+ * number of bus transactions on some arches.
12003+ *
12004+ * Returns: 0 if increment was not done, 1 otherwise.
12005+ */
12006+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12007+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12008+{
12009+ int val, c = hint, new;
12010+
12011+ /* sanity test, should be removed by compiler if hint is a constant */
12012+ if (!hint)
12013+ return __atomic_add_unless(v, 1, 0);
12014+
12015+ do {
12016+ asm volatile("incl %0\n"
12017+
12018+#ifdef CONFIG_PAX_REFCOUNT
12019+ "jno 0f\n"
12020+ "decl %0\n"
12021+ "int $4\n0:\n"
12022+ _ASM_EXTABLE(0b, 0b)
12023+#endif
12024+
12025+ : "=r" (new)
12026+ : "0" (c));
12027+
12028+ val = atomic_cmpxchg(v, c, new);
12029+ if (val == c)
12030+ return 1;
12031+ c = val;
12032+ } while (c);
12033+
12034+ return 0;
12035+}
12036+
12037+/**
12038 * atomic_inc_short - increment of a short integer
12039 * @v: pointer to type int
12040 *
12041@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12042 #endif
12043
12044 /* These are x86-specific, used by some header files */
12045-#define atomic_clear_mask(mask, addr) \
12046- asm volatile(LOCK_PREFIX "andl %0,%1" \
12047- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12048+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12049+{
12050+ asm volatile(LOCK_PREFIX "andl %1,%0"
12051+ : "+m" (v->counter)
12052+ : "r" (~(mask))
12053+ : "memory");
12054+}
12055
12056-#define atomic_set_mask(mask, addr) \
12057- asm volatile(LOCK_PREFIX "orl %0,%1" \
12058- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12059- : "memory")
12060+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12061+{
12062+ asm volatile(LOCK_PREFIX "andl %1,%0"
12063+ : "+m" (v->counter)
12064+ : "r" (~(mask))
12065+ : "memory");
12066+}
12067+
12068+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12069+{
12070+ asm volatile(LOCK_PREFIX "orl %1,%0"
12071+ : "+m" (v->counter)
12072+ : "r" (mask)
12073+ : "memory");
12074+}
12075+
12076+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12077+{
12078+ asm volatile(LOCK_PREFIX "orl %1,%0"
12079+ : "+m" (v->counter)
12080+ : "r" (mask)
12081+ : "memory");
12082+}
12083
12084 /* Atomic operations are already serializing on x86 */
12085 #define smp_mb__before_atomic_dec() barrier()
12086diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12087index b154de7..aadebd8 100644
12088--- a/arch/x86/include/asm/atomic64_32.h
12089+++ b/arch/x86/include/asm/atomic64_32.h
12090@@ -12,6 +12,14 @@ typedef struct {
12091 u64 __aligned(8) counter;
12092 } atomic64_t;
12093
12094+#ifdef CONFIG_PAX_REFCOUNT
12095+typedef struct {
12096+ u64 __aligned(8) counter;
12097+} atomic64_unchecked_t;
12098+#else
12099+typedef atomic64_t atomic64_unchecked_t;
12100+#endif
12101+
12102 #define ATOMIC64_INIT(val) { (val) }
12103
12104 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12105@@ -37,21 +45,31 @@ typedef struct {
12106 ATOMIC64_DECL_ONE(sym##_386)
12107
12108 ATOMIC64_DECL_ONE(add_386);
12109+ATOMIC64_DECL_ONE(add_unchecked_386);
12110 ATOMIC64_DECL_ONE(sub_386);
12111+ATOMIC64_DECL_ONE(sub_unchecked_386);
12112 ATOMIC64_DECL_ONE(inc_386);
12113+ATOMIC64_DECL_ONE(inc_unchecked_386);
12114 ATOMIC64_DECL_ONE(dec_386);
12115+ATOMIC64_DECL_ONE(dec_unchecked_386);
12116 #endif
12117
12118 #define alternative_atomic64(f, out, in...) \
12119 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12120
12121 ATOMIC64_DECL(read);
12122+ATOMIC64_DECL(read_unchecked);
12123 ATOMIC64_DECL(set);
12124+ATOMIC64_DECL(set_unchecked);
12125 ATOMIC64_DECL(xchg);
12126 ATOMIC64_DECL(add_return);
12127+ATOMIC64_DECL(add_return_unchecked);
12128 ATOMIC64_DECL(sub_return);
12129+ATOMIC64_DECL(sub_return_unchecked);
12130 ATOMIC64_DECL(inc_return);
12131+ATOMIC64_DECL(inc_return_unchecked);
12132 ATOMIC64_DECL(dec_return);
12133+ATOMIC64_DECL(dec_return_unchecked);
12134 ATOMIC64_DECL(dec_if_positive);
12135 ATOMIC64_DECL(inc_not_zero);
12136 ATOMIC64_DECL(add_unless);
12137@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12138 }
12139
12140 /**
12141+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12142+ * @p: pointer to type atomic64_unchecked_t
12143+ * @o: expected value
12144+ * @n: new value
12145+ *
12146+ * Atomically sets @v to @n if it was equal to @o and returns
12147+ * the old value.
12148+ */
12149+
12150+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12151+{
12152+ return cmpxchg64(&v->counter, o, n);
12153+}
12154+
12155+/**
12156 * atomic64_xchg - xchg atomic64 variable
12157 * @v: pointer to type atomic64_t
12158 * @n: value to assign
12159@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12160 }
12161
12162 /**
12163+ * atomic64_set_unchecked - set atomic64 variable
12164+ * @v: pointer to type atomic64_unchecked_t
12165+ * @n: value to assign
12166+ *
12167+ * Atomically sets the value of @v to @n.
12168+ */
12169+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12170+{
12171+ unsigned high = (unsigned)(i >> 32);
12172+ unsigned low = (unsigned)i;
12173+ alternative_atomic64(set, /* no output */,
12174+ "S" (v), "b" (low), "c" (high)
12175+ : "eax", "edx", "memory");
12176+}
12177+
12178+/**
12179 * atomic64_read - read atomic64 variable
12180 * @v: pointer to type atomic64_t
12181 *
12182@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12183 }
12184
12185 /**
12186+ * atomic64_read_unchecked - read atomic64 variable
12187+ * @v: pointer to type atomic64_unchecked_t
12188+ *
12189+ * Atomically reads the value of @v and returns it.
12190+ */
12191+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12192+{
12193+ long long r;
12194+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12195+ return r;
12196+ }
12197+
12198+/**
12199 * atomic64_add_return - add and return
12200 * @i: integer value to add
12201 * @v: pointer to type atomic64_t
12202@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12203 return i;
12204 }
12205
12206+/**
12207+ * atomic64_add_return_unchecked - add and return
12208+ * @i: integer value to add
12209+ * @v: pointer to type atomic64_unchecked_t
12210+ *
12211+ * Atomically adds @i to @v and returns @i + *@v
12212+ */
12213+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12214+{
12215+ alternative_atomic64(add_return_unchecked,
12216+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12217+ ASM_NO_INPUT_CLOBBER("memory"));
12218+ return i;
12219+}
12220+
12221 /*
12222 * Other variants with different arithmetic operators:
12223 */
12224@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12225 return a;
12226 }
12227
12228+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12229+{
12230+ long long a;
12231+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12232+ "S" (v) : "memory", "ecx");
12233+ return a;
12234+}
12235+
12236 static inline long long atomic64_dec_return(atomic64_t *v)
12237 {
12238 long long a;
12239@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12240 }
12241
12242 /**
12243+ * atomic64_add_unchecked - add integer to atomic64 variable
12244+ * @i: integer value to add
12245+ * @v: pointer to type atomic64_unchecked_t
12246+ *
12247+ * Atomically adds @i to @v.
12248+ */
12249+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12250+{
12251+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12252+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12253+ ASM_NO_INPUT_CLOBBER("memory"));
12254+ return i;
12255+}
12256+
12257+/**
12258 * atomic64_sub - subtract the atomic64 variable
12259 * @i: integer value to subtract
12260 * @v: pointer to type atomic64_t
12261diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12262index 0e1cbfc..5623683 100644
12263--- a/arch/x86/include/asm/atomic64_64.h
12264+++ b/arch/x86/include/asm/atomic64_64.h
12265@@ -18,7 +18,19 @@
12266 */
12267 static inline long atomic64_read(const atomic64_t *v)
12268 {
12269- return (*(volatile long *)&(v)->counter);
12270+ return (*(volatile const long *)&(v)->counter);
12271+}
12272+
12273+/**
12274+ * atomic64_read_unchecked - read atomic64 variable
12275+ * @v: pointer of type atomic64_unchecked_t
12276+ *
12277+ * Atomically reads the value of @v.
12278+ * Doesn't imply a read memory barrier.
12279+ */
12280+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12281+{
12282+ return (*(volatile const long *)&(v)->counter);
12283 }
12284
12285 /**
12286@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12287 }
12288
12289 /**
12290+ * atomic64_set_unchecked - set atomic64 variable
12291+ * @v: pointer to type atomic64_unchecked_t
12292+ * @i: required value
12293+ *
12294+ * Atomically sets the value of @v to @i.
12295+ */
12296+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12297+{
12298+ v->counter = i;
12299+}
12300+
12301+/**
12302 * atomic64_add - add integer to atomic64 variable
12303 * @i: integer value to add
12304 * @v: pointer to type atomic64_t
12305@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12306 */
12307 static inline void atomic64_add(long i, atomic64_t *v)
12308 {
12309+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12310+
12311+#ifdef CONFIG_PAX_REFCOUNT
12312+ "jno 0f\n"
12313+ LOCK_PREFIX "subq %1,%0\n"
12314+ "int $4\n0:\n"
12315+ _ASM_EXTABLE(0b, 0b)
12316+#endif
12317+
12318+ : "=m" (v->counter)
12319+ : "er" (i), "m" (v->counter));
12320+}
12321+
12322+/**
12323+ * atomic64_add_unchecked - add integer to atomic64 variable
12324+ * @i: integer value to add
12325+ * @v: pointer to type atomic64_unchecked_t
12326+ *
12327+ * Atomically adds @i to @v.
12328+ */
12329+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12330+{
12331 asm volatile(LOCK_PREFIX "addq %1,%0"
12332 : "=m" (v->counter)
12333 : "er" (i), "m" (v->counter));
12334@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12335 */
12336 static inline void atomic64_sub(long i, atomic64_t *v)
12337 {
12338- asm volatile(LOCK_PREFIX "subq %1,%0"
12339+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12340+
12341+#ifdef CONFIG_PAX_REFCOUNT
12342+ "jno 0f\n"
12343+ LOCK_PREFIX "addq %1,%0\n"
12344+ "int $4\n0:\n"
12345+ _ASM_EXTABLE(0b, 0b)
12346+#endif
12347+
12348+ : "=m" (v->counter)
12349+ : "er" (i), "m" (v->counter));
12350+}
12351+
12352+/**
12353+ * atomic64_sub_unchecked - subtract the atomic64 variable
12354+ * @i: integer value to subtract
12355+ * @v: pointer to type atomic64_unchecked_t
12356+ *
12357+ * Atomically subtracts @i from @v.
12358+ */
12359+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12360+{
12361+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12362 : "=m" (v->counter)
12363 : "er" (i), "m" (v->counter));
12364 }
12365@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12366 {
12367 unsigned char c;
12368
12369- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12370+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12371+
12372+#ifdef CONFIG_PAX_REFCOUNT
12373+ "jno 0f\n"
12374+ LOCK_PREFIX "addq %2,%0\n"
12375+ "int $4\n0:\n"
12376+ _ASM_EXTABLE(0b, 0b)
12377+#endif
12378+
12379+ "sete %1\n"
12380 : "=m" (v->counter), "=qm" (c)
12381 : "er" (i), "m" (v->counter) : "memory");
12382 return c;
12383@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12384 */
12385 static inline void atomic64_inc(atomic64_t *v)
12386 {
12387+ asm volatile(LOCK_PREFIX "incq %0\n"
12388+
12389+#ifdef CONFIG_PAX_REFCOUNT
12390+ "jno 0f\n"
12391+ LOCK_PREFIX "decq %0\n"
12392+ "int $4\n0:\n"
12393+ _ASM_EXTABLE(0b, 0b)
12394+#endif
12395+
12396+ : "=m" (v->counter)
12397+ : "m" (v->counter));
12398+}
12399+
12400+/**
12401+ * atomic64_inc_unchecked - increment atomic64 variable
12402+ * @v: pointer to type atomic64_unchecked_t
12403+ *
12404+ * Atomically increments @v by 1.
12405+ */
12406+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12407+{
12408 asm volatile(LOCK_PREFIX "incq %0"
12409 : "=m" (v->counter)
12410 : "m" (v->counter));
12411@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12412 */
12413 static inline void atomic64_dec(atomic64_t *v)
12414 {
12415- asm volatile(LOCK_PREFIX "decq %0"
12416+ asm volatile(LOCK_PREFIX "decq %0\n"
12417+
12418+#ifdef CONFIG_PAX_REFCOUNT
12419+ "jno 0f\n"
12420+ LOCK_PREFIX "incq %0\n"
12421+ "int $4\n0:\n"
12422+ _ASM_EXTABLE(0b, 0b)
12423+#endif
12424+
12425+ : "=m" (v->counter)
12426+ : "m" (v->counter));
12427+}
12428+
12429+/**
12430+ * atomic64_dec_unchecked - decrement atomic64 variable
12431+ * @v: pointer to type atomic64_t
12432+ *
12433+ * Atomically decrements @v by 1.
12434+ */
12435+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12436+{
12437+ asm volatile(LOCK_PREFIX "decq %0\n"
12438 : "=m" (v->counter)
12439 : "m" (v->counter));
12440 }
12441@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12442 {
12443 unsigned char c;
12444
12445- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12446+ asm volatile(LOCK_PREFIX "decq %0\n"
12447+
12448+#ifdef CONFIG_PAX_REFCOUNT
12449+ "jno 0f\n"
12450+ LOCK_PREFIX "incq %0\n"
12451+ "int $4\n0:\n"
12452+ _ASM_EXTABLE(0b, 0b)
12453+#endif
12454+
12455+ "sete %1\n"
12456 : "=m" (v->counter), "=qm" (c)
12457 : "m" (v->counter) : "memory");
12458 return c != 0;
12459@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12460 {
12461 unsigned char c;
12462
12463- asm volatile(LOCK_PREFIX "incq %0; sete %1"
12464+ asm volatile(LOCK_PREFIX "incq %0\n"
12465+
12466+#ifdef CONFIG_PAX_REFCOUNT
12467+ "jno 0f\n"
12468+ LOCK_PREFIX "decq %0\n"
12469+ "int $4\n0:\n"
12470+ _ASM_EXTABLE(0b, 0b)
12471+#endif
12472+
12473+ "sete %1\n"
12474 : "=m" (v->counter), "=qm" (c)
12475 : "m" (v->counter) : "memory");
12476 return c != 0;
12477@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12478 {
12479 unsigned char c;
12480
12481- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12482+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
12483+
12484+#ifdef CONFIG_PAX_REFCOUNT
12485+ "jno 0f\n"
12486+ LOCK_PREFIX "subq %2,%0\n"
12487+ "int $4\n0:\n"
12488+ _ASM_EXTABLE(0b, 0b)
12489+#endif
12490+
12491+ "sets %1\n"
12492 : "=m" (v->counter), "=qm" (c)
12493 : "er" (i), "m" (v->counter) : "memory");
12494 return c;
12495@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12496 */
12497 static inline long atomic64_add_return(long i, atomic64_t *v)
12498 {
12499+ return i + xadd_check_overflow(&v->counter, i);
12500+}
12501+
12502+/**
12503+ * atomic64_add_return_unchecked - add and return
12504+ * @i: integer value to add
12505+ * @v: pointer to type atomic64_unchecked_t
12506+ *
12507+ * Atomically adds @i to @v and returns @i + @v
12508+ */
12509+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12510+{
12511 return i + xadd(&v->counter, i);
12512 }
12513
12514@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12515 }
12516
12517 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12518+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12519+{
12520+ return atomic64_add_return_unchecked(1, v);
12521+}
12522 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12523
12524 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12525@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12526 return cmpxchg(&v->counter, old, new);
12527 }
12528
12529+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12530+{
12531+ return cmpxchg(&v->counter, old, new);
12532+}
12533+
12534 static inline long atomic64_xchg(atomic64_t *v, long new)
12535 {
12536 return xchg(&v->counter, new);
12537@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12538 */
12539 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12540 {
12541- long c, old;
12542+ long c, old, new;
12543 c = atomic64_read(v);
12544 for (;;) {
12545- if (unlikely(c == (u)))
12546+ if (unlikely(c == u))
12547 break;
12548- old = atomic64_cmpxchg((v), c, c + (a));
12549+
12550+ asm volatile("add %2,%0\n"
12551+
12552+#ifdef CONFIG_PAX_REFCOUNT
12553+ "jno 0f\n"
12554+ "sub %2,%0\n"
12555+ "int $4\n0:\n"
12556+ _ASM_EXTABLE(0b, 0b)
12557+#endif
12558+
12559+ : "=r" (new)
12560+ : "0" (c), "ir" (a));
12561+
12562+ old = atomic64_cmpxchg(v, c, new);
12563 if (likely(old == c))
12564 break;
12565 c = old;
12566 }
12567- return c != (u);
12568+ return c != u;
12569 }
12570
12571 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12572diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12573index 6dfd019..0c6699f 100644
12574--- a/arch/x86/include/asm/bitops.h
12575+++ b/arch/x86/include/asm/bitops.h
12576@@ -40,7 +40,7 @@
12577 * a mask operation on a byte.
12578 */
12579 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12580-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12581+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12582 #define CONST_MASK(nr) (1 << ((nr) & 7))
12583
12584 /**
12585diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12586index 4fa687a..60f2d39 100644
12587--- a/arch/x86/include/asm/boot.h
12588+++ b/arch/x86/include/asm/boot.h
12589@@ -6,10 +6,15 @@
12590 #include <uapi/asm/boot.h>
12591
12592 /* Physical address where kernel should be loaded. */
12593-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12594+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12595 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12596 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12597
12598+#ifndef __ASSEMBLY__
12599+extern unsigned char __LOAD_PHYSICAL_ADDR[];
12600+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12601+#endif
12602+
12603 /* Minimum kernel alignment, as a power of two */
12604 #ifdef CONFIG_X86_64
12605 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12606diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12607index 48f99f1..d78ebf9 100644
12608--- a/arch/x86/include/asm/cache.h
12609+++ b/arch/x86/include/asm/cache.h
12610@@ -5,12 +5,13 @@
12611
12612 /* L1 cache line size */
12613 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12614-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12615+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12616
12617 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12618+#define __read_only __attribute__((__section__(".data..read_only")))
12619
12620 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12621-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12622+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12623
12624 #ifdef CONFIG_X86_VSMP
12625 #ifdef CONFIG_SMP
12626diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12627index 9863ee3..4a1f8e1 100644
12628--- a/arch/x86/include/asm/cacheflush.h
12629+++ b/arch/x86/include/asm/cacheflush.h
12630@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12631 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12632
12633 if (pg_flags == _PGMT_DEFAULT)
12634- return -1;
12635+ return ~0UL;
12636 else if (pg_flags == _PGMT_WC)
12637 return _PAGE_CACHE_WC;
12638 else if (pg_flags == _PGMT_UC_MINUS)
12639diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12640index 46fc474..b02b0f9 100644
12641--- a/arch/x86/include/asm/checksum_32.h
12642+++ b/arch/x86/include/asm/checksum_32.h
12643@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12644 int len, __wsum sum,
12645 int *src_err_ptr, int *dst_err_ptr);
12646
12647+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12648+ int len, __wsum sum,
12649+ int *src_err_ptr, int *dst_err_ptr);
12650+
12651+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
12652+ int len, __wsum sum,
12653+ int *src_err_ptr, int *dst_err_ptr);
12654+
12655 /*
12656 * Note: when you get a NULL pointer exception here this means someone
12657 * passed in an incorrect kernel address to one of these functions.
12658@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
12659 int *err_ptr)
12660 {
12661 might_sleep();
12662- return csum_partial_copy_generic((__force void *)src, dst,
12663+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
12664 len, sum, err_ptr, NULL);
12665 }
12666
12667@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
12668 {
12669 might_sleep();
12670 if (access_ok(VERIFY_WRITE, dst, len))
12671- return csum_partial_copy_generic(src, (__force void *)dst,
12672+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
12673 len, sum, NULL, err_ptr);
12674
12675 if (len)
12676diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
12677index 8d871ea..c1a0dc9 100644
12678--- a/arch/x86/include/asm/cmpxchg.h
12679+++ b/arch/x86/include/asm/cmpxchg.h
12680@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
12681 __compiletime_error("Bad argument size for cmpxchg");
12682 extern void __xadd_wrong_size(void)
12683 __compiletime_error("Bad argument size for xadd");
12684+extern void __xadd_check_overflow_wrong_size(void)
12685+ __compiletime_error("Bad argument size for xadd_check_overflow");
12686 extern void __add_wrong_size(void)
12687 __compiletime_error("Bad argument size for add");
12688+extern void __add_check_overflow_wrong_size(void)
12689+ __compiletime_error("Bad argument size for add_check_overflow");
12690
12691 /*
12692 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
12693@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
12694 __ret; \
12695 })
12696
12697+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
12698+ ({ \
12699+ __typeof__ (*(ptr)) __ret = (arg); \
12700+ switch (sizeof(*(ptr))) { \
12701+ case __X86_CASE_L: \
12702+ asm volatile (lock #op "l %0, %1\n" \
12703+ "jno 0f\n" \
12704+ "mov %0,%1\n" \
12705+ "int $4\n0:\n" \
12706+ _ASM_EXTABLE(0b, 0b) \
12707+ : "+r" (__ret), "+m" (*(ptr)) \
12708+ : : "memory", "cc"); \
12709+ break; \
12710+ case __X86_CASE_Q: \
12711+ asm volatile (lock #op "q %q0, %1\n" \
12712+ "jno 0f\n" \
12713+ "mov %0,%1\n" \
12714+ "int $4\n0:\n" \
12715+ _ASM_EXTABLE(0b, 0b) \
12716+ : "+r" (__ret), "+m" (*(ptr)) \
12717+ : : "memory", "cc"); \
12718+ break; \
12719+ default: \
12720+ __ ## op ## _check_overflow_wrong_size(); \
12721+ } \
12722+ __ret; \
12723+ })
12724+
12725 /*
12726 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
12727 * Since this is generally used to protect other memory information, we
12728@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
12729 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
12730 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
12731
12732+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
12733+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
12734+
12735 #define __add(ptr, inc, lock) \
12736 ({ \
12737 __typeof__ (*(ptr)) __ret = (inc); \
12738diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
12739index 2d9075e..b75a844 100644
12740--- a/arch/x86/include/asm/cpufeature.h
12741+++ b/arch/x86/include/asm/cpufeature.h
12742@@ -206,7 +206,7 @@
12743 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
12744 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
12745 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
12746-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
12747+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
12748 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
12749 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
12750 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
12751@@ -375,7 +375,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
12752 ".section .discard,\"aw\",@progbits\n"
12753 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
12754 ".previous\n"
12755- ".section .altinstr_replacement,\"ax\"\n"
12756+ ".section .altinstr_replacement,\"a\"\n"
12757 "3: movb $1,%0\n"
12758 "4:\n"
12759 ".previous\n"
12760diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
12761index 8bf1c06..f723dfd 100644
12762--- a/arch/x86/include/asm/desc.h
12763+++ b/arch/x86/include/asm/desc.h
12764@@ -4,6 +4,7 @@
12765 #include <asm/desc_defs.h>
12766 #include <asm/ldt.h>
12767 #include <asm/mmu.h>
12768+#include <asm/pgtable.h>
12769
12770 #include <linux/smp.h>
12771 #include <linux/percpu.h>
12772@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12773
12774 desc->type = (info->read_exec_only ^ 1) << 1;
12775 desc->type |= info->contents << 2;
12776+ desc->type |= info->seg_not_present ^ 1;
12777
12778 desc->s = 1;
12779 desc->dpl = 0x3;
12780@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
12781 }
12782
12783 extern struct desc_ptr idt_descr;
12784-extern gate_desc idt_table[];
12785 extern struct desc_ptr nmi_idt_descr;
12786-extern gate_desc nmi_idt_table[];
12787-
12788-struct gdt_page {
12789- struct desc_struct gdt[GDT_ENTRIES];
12790-} __attribute__((aligned(PAGE_SIZE)));
12791-
12792-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
12793+extern gate_desc idt_table[256];
12794+extern gate_desc nmi_idt_table[256];
12795
12796+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
12797 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
12798 {
12799- return per_cpu(gdt_page, cpu).gdt;
12800+ return cpu_gdt_table[cpu];
12801 }
12802
12803 #ifdef CONFIG_X86_64
12804@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
12805 unsigned long base, unsigned dpl, unsigned flags,
12806 unsigned short seg)
12807 {
12808- gate->a = (seg << 16) | (base & 0xffff);
12809- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
12810+ gate->gate.offset_low = base;
12811+ gate->gate.seg = seg;
12812+ gate->gate.reserved = 0;
12813+ gate->gate.type = type;
12814+ gate->gate.s = 0;
12815+ gate->gate.dpl = dpl;
12816+ gate->gate.p = 1;
12817+ gate->gate.offset_high = base >> 16;
12818 }
12819
12820 #endif
12821@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
12822
12823 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
12824 {
12825+ pax_open_kernel();
12826 memcpy(&idt[entry], gate, sizeof(*gate));
12827+ pax_close_kernel();
12828 }
12829
12830 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
12831 {
12832+ pax_open_kernel();
12833 memcpy(&ldt[entry], desc, 8);
12834+ pax_close_kernel();
12835 }
12836
12837 static inline void
12838@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
12839 default: size = sizeof(*gdt); break;
12840 }
12841
12842+ pax_open_kernel();
12843 memcpy(&gdt[entry], desc, size);
12844+ pax_close_kernel();
12845 }
12846
12847 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
12848@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
12849
12850 static inline void native_load_tr_desc(void)
12851 {
12852+ pax_open_kernel();
12853 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
12854+ pax_close_kernel();
12855 }
12856
12857 static inline void native_load_gdt(const struct desc_ptr *dtr)
12858@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
12859 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
12860 unsigned int i;
12861
12862+ pax_open_kernel();
12863 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
12864 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
12865+ pax_close_kernel();
12866 }
12867
12868 #define _LDT_empty(info) \
12869@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
12870 }
12871
12872 #ifdef CONFIG_X86_64
12873-static inline void set_nmi_gate(int gate, void *addr)
12874+static inline void set_nmi_gate(int gate, const void *addr)
12875 {
12876 gate_desc s;
12877
12878@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
12879 }
12880 #endif
12881
12882-static inline void _set_gate(int gate, unsigned type, void *addr,
12883+static inline void _set_gate(int gate, unsigned type, const void *addr,
12884 unsigned dpl, unsigned ist, unsigned seg)
12885 {
12886 gate_desc s;
12887@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
12888 * Pentium F0 0F bugfix can have resulted in the mapped
12889 * IDT being write-protected.
12890 */
12891-static inline void set_intr_gate(unsigned int n, void *addr)
12892+static inline void set_intr_gate(unsigned int n, const void *addr)
12893 {
12894 BUG_ON((unsigned)n > 0xFF);
12895 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
12896@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
12897 /*
12898 * This routine sets up an interrupt gate at directory privilege level 3.
12899 */
12900-static inline void set_system_intr_gate(unsigned int n, void *addr)
12901+static inline void set_system_intr_gate(unsigned int n, const void *addr)
12902 {
12903 BUG_ON((unsigned)n > 0xFF);
12904 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
12905 }
12906
12907-static inline void set_system_trap_gate(unsigned int n, void *addr)
12908+static inline void set_system_trap_gate(unsigned int n, const void *addr)
12909 {
12910 BUG_ON((unsigned)n > 0xFF);
12911 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
12912 }
12913
12914-static inline void set_trap_gate(unsigned int n, void *addr)
12915+static inline void set_trap_gate(unsigned int n, const void *addr)
12916 {
12917 BUG_ON((unsigned)n > 0xFF);
12918 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
12919@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
12920 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
12921 {
12922 BUG_ON((unsigned)n > 0xFF);
12923- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
12924+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
12925 }
12926
12927-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
12928+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
12929 {
12930 BUG_ON((unsigned)n > 0xFF);
12931 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
12932 }
12933
12934-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
12935+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
12936 {
12937 BUG_ON((unsigned)n > 0xFF);
12938 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
12939 }
12940
12941+#ifdef CONFIG_X86_32
12942+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
12943+{
12944+ struct desc_struct d;
12945+
12946+ if (likely(limit))
12947+ limit = (limit - 1UL) >> PAGE_SHIFT;
12948+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
12949+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
12950+}
12951+#endif
12952+
12953 #endif /* _ASM_X86_DESC_H */
12954diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
12955index 278441f..b95a174 100644
12956--- a/arch/x86/include/asm/desc_defs.h
12957+++ b/arch/x86/include/asm/desc_defs.h
12958@@ -31,6 +31,12 @@ struct desc_struct {
12959 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
12960 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
12961 };
12962+ struct {
12963+ u16 offset_low;
12964+ u16 seg;
12965+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
12966+ unsigned offset_high: 16;
12967+ } gate;
12968 };
12969 } __attribute__((packed));
12970
12971diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
12972index 9c999c1..3860cb8 100644
12973--- a/arch/x86/include/asm/elf.h
12974+++ b/arch/x86/include/asm/elf.h
12975@@ -243,7 +243,25 @@ extern int force_personality32;
12976 the loader. We need to make sure that it is out of the way of the program
12977 that it will "exec", and that there is sufficient room for the brk. */
12978
12979+#ifdef CONFIG_PAX_SEGMEXEC
12980+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
12981+#else
12982 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
12983+#endif
12984+
12985+#ifdef CONFIG_PAX_ASLR
12986+#ifdef CONFIG_X86_32
12987+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
12988+
12989+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
12990+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
12991+#else
12992+#define PAX_ELF_ET_DYN_BASE 0x400000UL
12993+
12994+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
12995+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
12996+#endif
12997+#endif
12998
12999 /* This yields a mask that user programs can use to figure out what
13000 instruction set this CPU supports. This could be done in user space,
13001@@ -296,16 +314,12 @@ do { \
13002
13003 #define ARCH_DLINFO \
13004 do { \
13005- if (vdso_enabled) \
13006- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13007- (unsigned long)current->mm->context.vdso); \
13008+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13009 } while (0)
13010
13011 #define ARCH_DLINFO_X32 \
13012 do { \
13013- if (vdso_enabled) \
13014- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13015- (unsigned long)current->mm->context.vdso); \
13016+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13017 } while (0)
13018
13019 #define AT_SYSINFO 32
13020@@ -320,7 +334,7 @@ else \
13021
13022 #endif /* !CONFIG_X86_32 */
13023
13024-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13025+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13026
13027 #define VDSO_ENTRY \
13028 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13029@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13030 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13031 #define compat_arch_setup_additional_pages syscall32_setup_pages
13032
13033-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13034-#define arch_randomize_brk arch_randomize_brk
13035-
13036 /*
13037 * True on X86_32 or when emulating IA32 on X86_64
13038 */
13039diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13040index 75ce3f4..882e801 100644
13041--- a/arch/x86/include/asm/emergency-restart.h
13042+++ b/arch/x86/include/asm/emergency-restart.h
13043@@ -13,6 +13,6 @@ enum reboot_type {
13044
13045 extern enum reboot_type reboot_type;
13046
13047-extern void machine_emergency_restart(void);
13048+extern void machine_emergency_restart(void) __noreturn;
13049
13050 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13051diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13052index 41ab26e..a88c9e6 100644
13053--- a/arch/x86/include/asm/fpu-internal.h
13054+++ b/arch/x86/include/asm/fpu-internal.h
13055@@ -126,7 +126,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13056 ({ \
13057 int err; \
13058 asm volatile(ASM_STAC "\n" \
13059- "1:" #insn "\n\t" \
13060+ "1:" \
13061+ __copyuser_seg \
13062+ #insn "\n\t" \
13063 "2: " ASM_CLAC "\n" \
13064 ".section .fixup,\"ax\"\n" \
13065 "3: movl $-1,%[err]\n" \
13066@@ -299,7 +301,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13067 "emms\n\t" /* clear stack tags */
13068 "fildl %P[addr]", /* set F?P to defined value */
13069 X86_FEATURE_FXSAVE_LEAK,
13070- [addr] "m" (tsk->thread.fpu.has_fpu));
13071+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13072
13073 return fpu_restore_checking(&tsk->thread.fpu);
13074 }
13075diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13076index be27ba1..8f13ff9 100644
13077--- a/arch/x86/include/asm/futex.h
13078+++ b/arch/x86/include/asm/futex.h
13079@@ -12,6 +12,7 @@
13080 #include <asm/smap.h>
13081
13082 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13083+ typecheck(u32 __user *, uaddr); \
13084 asm volatile("\t" ASM_STAC "\n" \
13085 "1:\t" insn "\n" \
13086 "2:\t" ASM_CLAC "\n" \
13087@@ -20,15 +21,16 @@
13088 "\tjmp\t2b\n" \
13089 "\t.previous\n" \
13090 _ASM_EXTABLE(1b, 3b) \
13091- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13092+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13093 : "i" (-EFAULT), "0" (oparg), "1" (0))
13094
13095 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13096+ typecheck(u32 __user *, uaddr); \
13097 asm volatile("\t" ASM_STAC "\n" \
13098 "1:\tmovl %2, %0\n" \
13099 "\tmovl\t%0, %3\n" \
13100 "\t" insn "\n" \
13101- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13102+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13103 "\tjnz\t1b\n" \
13104 "3:\t" ASM_CLAC "\n" \
13105 "\t.section .fixup,\"ax\"\n" \
13106@@ -38,7 +40,7 @@
13107 _ASM_EXTABLE(1b, 4b) \
13108 _ASM_EXTABLE(2b, 4b) \
13109 : "=&a" (oldval), "=&r" (ret), \
13110- "+m" (*uaddr), "=&r" (tem) \
13111+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13112 : "r" (oparg), "i" (-EFAULT), "1" (0))
13113
13114 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13115@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13116
13117 switch (op) {
13118 case FUTEX_OP_SET:
13119- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13120+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13121 break;
13122 case FUTEX_OP_ADD:
13123- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13124+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13125 uaddr, oparg);
13126 break;
13127 case FUTEX_OP_OR:
13128@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13129 return -EFAULT;
13130
13131 asm volatile("\t" ASM_STAC "\n"
13132- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13133+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13134 "2:\t" ASM_CLAC "\n"
13135 "\t.section .fixup, \"ax\"\n"
13136 "3:\tmov %3, %0\n"
13137 "\tjmp 2b\n"
13138 "\t.previous\n"
13139 _ASM_EXTABLE(1b, 3b)
13140- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13141+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13142 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13143 : "memory"
13144 );
13145diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13146index eb92a6e..b98b2f4 100644
13147--- a/arch/x86/include/asm/hw_irq.h
13148+++ b/arch/x86/include/asm/hw_irq.h
13149@@ -136,8 +136,8 @@ extern void setup_ioapic_dest(void);
13150 extern void enable_IO_APIC(void);
13151
13152 /* Statistics */
13153-extern atomic_t irq_err_count;
13154-extern atomic_t irq_mis_count;
13155+extern atomic_unchecked_t irq_err_count;
13156+extern atomic_unchecked_t irq_mis_count;
13157
13158 /* EISA */
13159 extern void eisa_set_level_irq(unsigned int irq);
13160diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13161index a203659..9889f1c 100644
13162--- a/arch/x86/include/asm/i8259.h
13163+++ b/arch/x86/include/asm/i8259.h
13164@@ -62,7 +62,7 @@ struct legacy_pic {
13165 void (*init)(int auto_eoi);
13166 int (*irq_pending)(unsigned int irq);
13167 void (*make_irq)(unsigned int irq);
13168-};
13169+} __do_const;
13170
13171 extern struct legacy_pic *legacy_pic;
13172 extern struct legacy_pic null_legacy_pic;
13173diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13174index d8e8eef..15b1179 100644
13175--- a/arch/x86/include/asm/io.h
13176+++ b/arch/x86/include/asm/io.h
13177@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13178 return ioremap_nocache(offset, size);
13179 }
13180
13181-extern void iounmap(volatile void __iomem *addr);
13182+extern void iounmap(const volatile void __iomem *addr);
13183
13184 extern void set_iounmap_nonlazy(void);
13185
13186@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13187
13188 #include <linux/vmalloc.h>
13189
13190+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13191+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13192+{
13193+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13194+}
13195+
13196+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13197+{
13198+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13199+}
13200+
13201 /*
13202 * Convert a virtual cached pointer to an uncached pointer
13203 */
13204diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13205index bba3cf8..06bc8da 100644
13206--- a/arch/x86/include/asm/irqflags.h
13207+++ b/arch/x86/include/asm/irqflags.h
13208@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13209 sti; \
13210 sysexit
13211
13212+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13213+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13214+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13215+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13216+
13217 #else
13218 #define INTERRUPT_RETURN iret
13219 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13220diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13221index d3ddd17..c9fb0cc 100644
13222--- a/arch/x86/include/asm/kprobes.h
13223+++ b/arch/x86/include/asm/kprobes.h
13224@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13225 #define RELATIVEJUMP_SIZE 5
13226 #define RELATIVECALL_OPCODE 0xe8
13227 #define RELATIVE_ADDR_SIZE 4
13228-#define MAX_STACK_SIZE 64
13229-#define MIN_STACK_SIZE(ADDR) \
13230- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13231- THREAD_SIZE - (unsigned long)(ADDR))) \
13232- ? (MAX_STACK_SIZE) \
13233- : (((unsigned long)current_thread_info()) + \
13234- THREAD_SIZE - (unsigned long)(ADDR)))
13235+#define MAX_STACK_SIZE 64UL
13236+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13237
13238 #define flush_insn_slot(p) do { } while (0)
13239
13240diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13241index 2d89e39..baee879 100644
13242--- a/arch/x86/include/asm/local.h
13243+++ b/arch/x86/include/asm/local.h
13244@@ -10,33 +10,97 @@ typedef struct {
13245 atomic_long_t a;
13246 } local_t;
13247
13248+typedef struct {
13249+ atomic_long_unchecked_t a;
13250+} local_unchecked_t;
13251+
13252 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13253
13254 #define local_read(l) atomic_long_read(&(l)->a)
13255+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13256 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13257+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13258
13259 static inline void local_inc(local_t *l)
13260 {
13261- asm volatile(_ASM_INC "%0"
13262+ asm volatile(_ASM_INC "%0\n"
13263+
13264+#ifdef CONFIG_PAX_REFCOUNT
13265+ "jno 0f\n"
13266+ _ASM_DEC "%0\n"
13267+ "int $4\n0:\n"
13268+ _ASM_EXTABLE(0b, 0b)
13269+#endif
13270+
13271+ : "+m" (l->a.counter));
13272+}
13273+
13274+static inline void local_inc_unchecked(local_unchecked_t *l)
13275+{
13276+ asm volatile(_ASM_INC "%0\n"
13277 : "+m" (l->a.counter));
13278 }
13279
13280 static inline void local_dec(local_t *l)
13281 {
13282- asm volatile(_ASM_DEC "%0"
13283+ asm volatile(_ASM_DEC "%0\n"
13284+
13285+#ifdef CONFIG_PAX_REFCOUNT
13286+ "jno 0f\n"
13287+ _ASM_INC "%0\n"
13288+ "int $4\n0:\n"
13289+ _ASM_EXTABLE(0b, 0b)
13290+#endif
13291+
13292+ : "+m" (l->a.counter));
13293+}
13294+
13295+static inline void local_dec_unchecked(local_unchecked_t *l)
13296+{
13297+ asm volatile(_ASM_DEC "%0\n"
13298 : "+m" (l->a.counter));
13299 }
13300
13301 static inline void local_add(long i, local_t *l)
13302 {
13303- asm volatile(_ASM_ADD "%1,%0"
13304+ asm volatile(_ASM_ADD "%1,%0\n"
13305+
13306+#ifdef CONFIG_PAX_REFCOUNT
13307+ "jno 0f\n"
13308+ _ASM_SUB "%1,%0\n"
13309+ "int $4\n0:\n"
13310+ _ASM_EXTABLE(0b, 0b)
13311+#endif
13312+
13313+ : "+m" (l->a.counter)
13314+ : "ir" (i));
13315+}
13316+
13317+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13318+{
13319+ asm volatile(_ASM_ADD "%1,%0\n"
13320 : "+m" (l->a.counter)
13321 : "ir" (i));
13322 }
13323
13324 static inline void local_sub(long i, local_t *l)
13325 {
13326- asm volatile(_ASM_SUB "%1,%0"
13327+ asm volatile(_ASM_SUB "%1,%0\n"
13328+
13329+#ifdef CONFIG_PAX_REFCOUNT
13330+ "jno 0f\n"
13331+ _ASM_ADD "%1,%0\n"
13332+ "int $4\n0:\n"
13333+ _ASM_EXTABLE(0b, 0b)
13334+#endif
13335+
13336+ : "+m" (l->a.counter)
13337+ : "ir" (i));
13338+}
13339+
13340+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13341+{
13342+ asm volatile(_ASM_SUB "%1,%0\n"
13343 : "+m" (l->a.counter)
13344 : "ir" (i));
13345 }
13346@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13347 {
13348 unsigned char c;
13349
13350- asm volatile(_ASM_SUB "%2,%0; sete %1"
13351+ asm volatile(_ASM_SUB "%2,%0\n"
13352+
13353+#ifdef CONFIG_PAX_REFCOUNT
13354+ "jno 0f\n"
13355+ _ASM_ADD "%2,%0\n"
13356+ "int $4\n0:\n"
13357+ _ASM_EXTABLE(0b, 0b)
13358+#endif
13359+
13360+ "sete %1\n"
13361 : "+m" (l->a.counter), "=qm" (c)
13362 : "ir" (i) : "memory");
13363 return c;
13364@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13365 {
13366 unsigned char c;
13367
13368- asm volatile(_ASM_DEC "%0; sete %1"
13369+ asm volatile(_ASM_DEC "%0\n"
13370+
13371+#ifdef CONFIG_PAX_REFCOUNT
13372+ "jno 0f\n"
13373+ _ASM_INC "%0\n"
13374+ "int $4\n0:\n"
13375+ _ASM_EXTABLE(0b, 0b)
13376+#endif
13377+
13378+ "sete %1\n"
13379 : "+m" (l->a.counter), "=qm" (c)
13380 : : "memory");
13381 return c != 0;
13382@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13383 {
13384 unsigned char c;
13385
13386- asm volatile(_ASM_INC "%0; sete %1"
13387+ asm volatile(_ASM_INC "%0\n"
13388+
13389+#ifdef CONFIG_PAX_REFCOUNT
13390+ "jno 0f\n"
13391+ _ASM_DEC "%0\n"
13392+ "int $4\n0:\n"
13393+ _ASM_EXTABLE(0b, 0b)
13394+#endif
13395+
13396+ "sete %1\n"
13397 : "+m" (l->a.counter), "=qm" (c)
13398 : : "memory");
13399 return c != 0;
13400@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13401 {
13402 unsigned char c;
13403
13404- asm volatile(_ASM_ADD "%2,%0; sets %1"
13405+ asm volatile(_ASM_ADD "%2,%0\n"
13406+
13407+#ifdef CONFIG_PAX_REFCOUNT
13408+ "jno 0f\n"
13409+ _ASM_SUB "%2,%0\n"
13410+ "int $4\n0:\n"
13411+ _ASM_EXTABLE(0b, 0b)
13412+#endif
13413+
13414+ "sets %1\n"
13415 : "+m" (l->a.counter), "=qm" (c)
13416 : "ir" (i) : "memory");
13417 return c;
13418@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13419 static inline long local_add_return(long i, local_t *l)
13420 {
13421 long __i = i;
13422+ asm volatile(_ASM_XADD "%0, %1\n"
13423+
13424+#ifdef CONFIG_PAX_REFCOUNT
13425+ "jno 0f\n"
13426+ _ASM_MOV "%0,%1\n"
13427+ "int $4\n0:\n"
13428+ _ASM_EXTABLE(0b, 0b)
13429+#endif
13430+
13431+ : "+r" (i), "+m" (l->a.counter)
13432+ : : "memory");
13433+ return i + __i;
13434+}
13435+
13436+/**
13437+ * local_add_return_unchecked - add and return
13438+ * @i: integer value to add
13439+ * @l: pointer to type local_unchecked_t
13440+ *
13441+ * Atomically adds @i to @l and returns @i + @l
13442+ */
13443+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13444+{
13445+ long __i = i;
13446 asm volatile(_ASM_XADD "%0, %1;"
13447 : "+r" (i), "+m" (l->a.counter)
13448 : : "memory");
13449@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13450
13451 #define local_cmpxchg(l, o, n) \
13452 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13453+#define local_cmpxchg_unchecked(l, o, n) \
13454+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
13455 /* Always has a lock prefix */
13456 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13457
13458diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13459new file mode 100644
13460index 0000000..2bfd3ba
13461--- /dev/null
13462+++ b/arch/x86/include/asm/mman.h
13463@@ -0,0 +1,15 @@
13464+#ifndef _X86_MMAN_H
13465+#define _X86_MMAN_H
13466+
13467+#include <uapi/asm/mman.h>
13468+
13469+#ifdef __KERNEL__
13470+#ifndef __ASSEMBLY__
13471+#ifdef CONFIG_X86_32
13472+#define arch_mmap_check i386_mmap_check
13473+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13474+#endif
13475+#endif
13476+#endif
13477+
13478+#endif /* X86_MMAN_H */
13479diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13480index 5f55e69..e20bfb1 100644
13481--- a/arch/x86/include/asm/mmu.h
13482+++ b/arch/x86/include/asm/mmu.h
13483@@ -9,7 +9,7 @@
13484 * we put the segment information here.
13485 */
13486 typedef struct {
13487- void *ldt;
13488+ struct desc_struct *ldt;
13489 int size;
13490
13491 #ifdef CONFIG_X86_64
13492@@ -18,7 +18,19 @@ typedef struct {
13493 #endif
13494
13495 struct mutex lock;
13496- void *vdso;
13497+ unsigned long vdso;
13498+
13499+#ifdef CONFIG_X86_32
13500+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13501+ unsigned long user_cs_base;
13502+ unsigned long user_cs_limit;
13503+
13504+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13505+ cpumask_t cpu_user_cs_mask;
13506+#endif
13507+
13508+#endif
13509+#endif
13510 } mm_context_t;
13511
13512 #ifdef CONFIG_SMP
13513diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13514index cdbf367..adb37ac 100644
13515--- a/arch/x86/include/asm/mmu_context.h
13516+++ b/arch/x86/include/asm/mmu_context.h
13517@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13518
13519 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13520 {
13521+
13522+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13523+ unsigned int i;
13524+ pgd_t *pgd;
13525+
13526+ pax_open_kernel();
13527+ pgd = get_cpu_pgd(smp_processor_id());
13528+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13529+ set_pgd_batched(pgd+i, native_make_pgd(0));
13530+ pax_close_kernel();
13531+#endif
13532+
13533 #ifdef CONFIG_SMP
13534 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13535 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13536@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13537 struct task_struct *tsk)
13538 {
13539 unsigned cpu = smp_processor_id();
13540+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13541+ int tlbstate = TLBSTATE_OK;
13542+#endif
13543
13544 if (likely(prev != next)) {
13545 #ifdef CONFIG_SMP
13546+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13547+ tlbstate = this_cpu_read(cpu_tlbstate.state);
13548+#endif
13549 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13550 this_cpu_write(cpu_tlbstate.active_mm, next);
13551 #endif
13552 cpumask_set_cpu(cpu, mm_cpumask(next));
13553
13554 /* Re-load page tables */
13555+#ifdef CONFIG_PAX_PER_CPU_PGD
13556+ pax_open_kernel();
13557+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13558+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13559+ pax_close_kernel();
13560+ load_cr3(get_cpu_pgd(cpu));
13561+#else
13562 load_cr3(next->pgd);
13563+#endif
13564
13565 /* stop flush ipis for the previous mm */
13566 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13567@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13568 */
13569 if (unlikely(prev->context.ldt != next->context.ldt))
13570 load_LDT_nolock(&next->context);
13571- }
13572+
13573+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13574+ if (!(__supported_pte_mask & _PAGE_NX)) {
13575+ smp_mb__before_clear_bit();
13576+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13577+ smp_mb__after_clear_bit();
13578+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13579+ }
13580+#endif
13581+
13582+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13583+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13584+ prev->context.user_cs_limit != next->context.user_cs_limit))
13585+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13586 #ifdef CONFIG_SMP
13587+ else if (unlikely(tlbstate != TLBSTATE_OK))
13588+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13589+#endif
13590+#endif
13591+
13592+ }
13593 else {
13594+
13595+#ifdef CONFIG_PAX_PER_CPU_PGD
13596+ pax_open_kernel();
13597+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13598+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13599+ pax_close_kernel();
13600+ load_cr3(get_cpu_pgd(cpu));
13601+#endif
13602+
13603+#ifdef CONFIG_SMP
13604 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13605 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
13606
13607@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13608 * tlb flush IPI delivery. We must reload CR3
13609 * to make sure to use no freed page tables.
13610 */
13611+
13612+#ifndef CONFIG_PAX_PER_CPU_PGD
13613 load_cr3(next->pgd);
13614+#endif
13615+
13616 load_LDT_nolock(&next->context);
13617+
13618+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
13619+ if (!(__supported_pte_mask & _PAGE_NX))
13620+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13621+#endif
13622+
13623+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13624+#ifdef CONFIG_PAX_PAGEEXEC
13625+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
13626+#endif
13627+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13628+#endif
13629+
13630 }
13631+#endif
13632 }
13633-#endif
13634 }
13635
13636 #define activate_mm(prev, next) \
13637diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
13638index e3b7819..b257c64 100644
13639--- a/arch/x86/include/asm/module.h
13640+++ b/arch/x86/include/asm/module.h
13641@@ -5,6 +5,7 @@
13642
13643 #ifdef CONFIG_X86_64
13644 /* X86_64 does not define MODULE_PROC_FAMILY */
13645+#define MODULE_PROC_FAMILY ""
13646 #elif defined CONFIG_M486
13647 #define MODULE_PROC_FAMILY "486 "
13648 #elif defined CONFIG_M586
13649@@ -57,8 +58,20 @@
13650 #error unknown processor family
13651 #endif
13652
13653-#ifdef CONFIG_X86_32
13654-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
13655+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
13656+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
13657+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
13658+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
13659+#else
13660+#define MODULE_PAX_KERNEXEC ""
13661 #endif
13662
13663+#ifdef CONFIG_PAX_MEMORY_UDEREF
13664+#define MODULE_PAX_UDEREF "UDEREF "
13665+#else
13666+#define MODULE_PAX_UDEREF ""
13667+#endif
13668+
13669+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
13670+
13671 #endif /* _ASM_X86_MODULE_H */
13672diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
13673index c0fa356..07a498a 100644
13674--- a/arch/x86/include/asm/nmi.h
13675+++ b/arch/x86/include/asm/nmi.h
13676@@ -42,11 +42,11 @@ struct nmiaction {
13677 nmi_handler_t handler;
13678 unsigned long flags;
13679 const char *name;
13680-};
13681+} __do_const;
13682
13683 #define register_nmi_handler(t, fn, fg, n, init...) \
13684 ({ \
13685- static struct nmiaction init fn##_na = { \
13686+ static const struct nmiaction init fn##_na = { \
13687 .handler = (fn), \
13688 .name = (n), \
13689 .flags = (fg), \
13690@@ -54,7 +54,7 @@ struct nmiaction {
13691 __register_nmi_handler((t), &fn##_na); \
13692 })
13693
13694-int __register_nmi_handler(unsigned int, struct nmiaction *);
13695+int __register_nmi_handler(unsigned int, const struct nmiaction *);
13696
13697 void unregister_nmi_handler(unsigned int, const char *);
13698
13699diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
13700index 320f7bb..e89f8f8 100644
13701--- a/arch/x86/include/asm/page_64_types.h
13702+++ b/arch/x86/include/asm/page_64_types.h
13703@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
13704
13705 /* duplicated to the one in bootmem.h */
13706 extern unsigned long max_pfn;
13707-extern unsigned long phys_base;
13708+extern const unsigned long phys_base;
13709
13710 extern unsigned long __phys_addr(unsigned long);
13711 #define __phys_reloc_hide(x) (x)
13712diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
13713index 5edd174..9cf5821 100644
13714--- a/arch/x86/include/asm/paravirt.h
13715+++ b/arch/x86/include/asm/paravirt.h
13716@@ -630,6 +630,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
13717 val);
13718 }
13719
13720+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
13721+{
13722+ pgdval_t val = native_pgd_val(pgd);
13723+
13724+ if (sizeof(pgdval_t) > sizeof(long))
13725+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
13726+ val, (u64)val >> 32);
13727+ else
13728+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
13729+ val);
13730+}
13731+
13732 static inline void pgd_clear(pgd_t *pgdp)
13733 {
13734 set_pgd(pgdp, __pgd(0));
13735@@ -711,6 +723,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
13736 pv_mmu_ops.set_fixmap(idx, phys, flags);
13737 }
13738
13739+#ifdef CONFIG_PAX_KERNEXEC
13740+static inline unsigned long pax_open_kernel(void)
13741+{
13742+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
13743+}
13744+
13745+static inline unsigned long pax_close_kernel(void)
13746+{
13747+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
13748+}
13749+#else
13750+static inline unsigned long pax_open_kernel(void) { return 0; }
13751+static inline unsigned long pax_close_kernel(void) { return 0; }
13752+#endif
13753+
13754 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
13755
13756 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
13757@@ -927,7 +954,7 @@ extern void default_banner(void);
13758
13759 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
13760 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
13761-#define PARA_INDIRECT(addr) *%cs:addr
13762+#define PARA_INDIRECT(addr) *%ss:addr
13763 #endif
13764
13765 #define INTERRUPT_RETURN \
13766@@ -1002,6 +1029,21 @@ extern void default_banner(void);
13767 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
13768 CLBR_NONE, \
13769 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
13770+
13771+#define GET_CR0_INTO_RDI \
13772+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
13773+ mov %rax,%rdi
13774+
13775+#define SET_RDI_INTO_CR0 \
13776+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13777+
13778+#define GET_CR3_INTO_RDI \
13779+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
13780+ mov %rax,%rdi
13781+
13782+#define SET_RDI_INTO_CR3 \
13783+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
13784+
13785 #endif /* CONFIG_X86_32 */
13786
13787 #endif /* __ASSEMBLY__ */
13788diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
13789index 142236e..5446ffbc 100644
13790--- a/arch/x86/include/asm/paravirt_types.h
13791+++ b/arch/x86/include/asm/paravirt_types.h
13792@@ -84,7 +84,7 @@ struct pv_init_ops {
13793 */
13794 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
13795 unsigned long addr, unsigned len);
13796-};
13797+} __no_const;
13798
13799
13800 struct pv_lazy_ops {
13801@@ -97,7 +97,7 @@ struct pv_time_ops {
13802 unsigned long long (*sched_clock)(void);
13803 unsigned long long (*steal_clock)(int cpu);
13804 unsigned long (*get_tsc_khz)(void);
13805-};
13806+} __no_const;
13807
13808 struct pv_cpu_ops {
13809 /* hooks for various privileged instructions */
13810@@ -191,7 +191,7 @@ struct pv_cpu_ops {
13811
13812 void (*start_context_switch)(struct task_struct *prev);
13813 void (*end_context_switch)(struct task_struct *next);
13814-};
13815+} __no_const;
13816
13817 struct pv_irq_ops {
13818 /*
13819@@ -222,7 +222,7 @@ struct pv_apic_ops {
13820 unsigned long start_eip,
13821 unsigned long start_esp);
13822 #endif
13823-};
13824+} __no_const;
13825
13826 struct pv_mmu_ops {
13827 unsigned long (*read_cr2)(void);
13828@@ -312,6 +312,7 @@ struct pv_mmu_ops {
13829 struct paravirt_callee_save make_pud;
13830
13831 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
13832+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
13833 #endif /* PAGETABLE_LEVELS == 4 */
13834 #endif /* PAGETABLE_LEVELS >= 3 */
13835
13836@@ -323,6 +324,12 @@ struct pv_mmu_ops {
13837 an mfn. We can tell which is which from the index. */
13838 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
13839 phys_addr_t phys, pgprot_t flags);
13840+
13841+#ifdef CONFIG_PAX_KERNEXEC
13842+ unsigned long (*pax_open_kernel)(void);
13843+ unsigned long (*pax_close_kernel)(void);
13844+#endif
13845+
13846 };
13847
13848 struct arch_spinlock;
13849@@ -333,7 +340,7 @@ struct pv_lock_ops {
13850 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
13851 int (*spin_trylock)(struct arch_spinlock *lock);
13852 void (*spin_unlock)(struct arch_spinlock *lock);
13853-};
13854+} __no_const;
13855
13856 /* This contains all the paravirt structures: we get a convenient
13857 * number for each function using the offset which we use to indicate
13858diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
13859index b4389a4..7024269 100644
13860--- a/arch/x86/include/asm/pgalloc.h
13861+++ b/arch/x86/include/asm/pgalloc.h
13862@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
13863 pmd_t *pmd, pte_t *pte)
13864 {
13865 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
13866+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
13867+}
13868+
13869+static inline void pmd_populate_user(struct mm_struct *mm,
13870+ pmd_t *pmd, pte_t *pte)
13871+{
13872+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
13873 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
13874 }
13875
13876@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
13877
13878 #ifdef CONFIG_X86_PAE
13879 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
13880+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
13881+{
13882+ pud_populate(mm, pudp, pmd);
13883+}
13884 #else /* !CONFIG_X86_PAE */
13885 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
13886 {
13887 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
13888 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
13889 }
13890+
13891+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
13892+{
13893+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
13894+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
13895+}
13896 #endif /* CONFIG_X86_PAE */
13897
13898 #if PAGETABLE_LEVELS > 3
13899@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
13900 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
13901 }
13902
13903+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
13904+{
13905+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
13906+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
13907+}
13908+
13909 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
13910 {
13911 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
13912diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
13913index f2b489c..4f7e2e5 100644
13914--- a/arch/x86/include/asm/pgtable-2level.h
13915+++ b/arch/x86/include/asm/pgtable-2level.h
13916@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
13917
13918 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
13919 {
13920+ pax_open_kernel();
13921 *pmdp = pmd;
13922+ pax_close_kernel();
13923 }
13924
13925 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
13926diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
13927index 4cc9f2b..5fd9226 100644
13928--- a/arch/x86/include/asm/pgtable-3level.h
13929+++ b/arch/x86/include/asm/pgtable-3level.h
13930@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
13931
13932 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
13933 {
13934+ pax_open_kernel();
13935 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
13936+ pax_close_kernel();
13937 }
13938
13939 static inline void native_set_pud(pud_t *pudp, pud_t pud)
13940 {
13941+ pax_open_kernel();
13942 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
13943+ pax_close_kernel();
13944 }
13945
13946 /*
13947diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
13948index 1c1a955..50f828c 100644
13949--- a/arch/x86/include/asm/pgtable.h
13950+++ b/arch/x86/include/asm/pgtable.h
13951@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
13952
13953 #ifndef __PAGETABLE_PUD_FOLDED
13954 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
13955+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
13956 #define pgd_clear(pgd) native_pgd_clear(pgd)
13957 #endif
13958
13959@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
13960
13961 #define arch_end_context_switch(prev) do {} while(0)
13962
13963+#define pax_open_kernel() native_pax_open_kernel()
13964+#define pax_close_kernel() native_pax_close_kernel()
13965 #endif /* CONFIG_PARAVIRT */
13966
13967+#define __HAVE_ARCH_PAX_OPEN_KERNEL
13968+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
13969+
13970+#ifdef CONFIG_PAX_KERNEXEC
13971+static inline unsigned long native_pax_open_kernel(void)
13972+{
13973+ unsigned long cr0;
13974+
13975+ preempt_disable();
13976+ barrier();
13977+ cr0 = read_cr0() ^ X86_CR0_WP;
13978+ BUG_ON(cr0 & X86_CR0_WP);
13979+ write_cr0(cr0);
13980+ return cr0 ^ X86_CR0_WP;
13981+}
13982+
13983+static inline unsigned long native_pax_close_kernel(void)
13984+{
13985+ unsigned long cr0;
13986+
13987+ cr0 = read_cr0() ^ X86_CR0_WP;
13988+ BUG_ON(!(cr0 & X86_CR0_WP));
13989+ write_cr0(cr0);
13990+ barrier();
13991+ preempt_enable_no_resched();
13992+ return cr0 ^ X86_CR0_WP;
13993+}
13994+#else
13995+static inline unsigned long native_pax_open_kernel(void) { return 0; }
13996+static inline unsigned long native_pax_close_kernel(void) { return 0; }
13997+#endif
13998+
13999 /*
14000 * The following only work if pte_present() is true.
14001 * Undefined behaviour if not..
14002 */
14003+static inline int pte_user(pte_t pte)
14004+{
14005+ return pte_val(pte) & _PAGE_USER;
14006+}
14007+
14008 static inline int pte_dirty(pte_t pte)
14009 {
14010 return pte_flags(pte) & _PAGE_DIRTY;
14011@@ -200,9 +240,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14012 return pte_clear_flags(pte, _PAGE_RW);
14013 }
14014
14015+static inline pte_t pte_mkread(pte_t pte)
14016+{
14017+ return __pte(pte_val(pte) | _PAGE_USER);
14018+}
14019+
14020 static inline pte_t pte_mkexec(pte_t pte)
14021 {
14022- return pte_clear_flags(pte, _PAGE_NX);
14023+#ifdef CONFIG_X86_PAE
14024+ if (__supported_pte_mask & _PAGE_NX)
14025+ return pte_clear_flags(pte, _PAGE_NX);
14026+ else
14027+#endif
14028+ return pte_set_flags(pte, _PAGE_USER);
14029+}
14030+
14031+static inline pte_t pte_exprotect(pte_t pte)
14032+{
14033+#ifdef CONFIG_X86_PAE
14034+ if (__supported_pte_mask & _PAGE_NX)
14035+ return pte_set_flags(pte, _PAGE_NX);
14036+ else
14037+#endif
14038+ return pte_clear_flags(pte, _PAGE_USER);
14039 }
14040
14041 static inline pte_t pte_mkdirty(pte_t pte)
14042@@ -394,6 +454,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14043 #endif
14044
14045 #ifndef __ASSEMBLY__
14046+
14047+#ifdef CONFIG_PAX_PER_CPU_PGD
14048+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14049+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14050+{
14051+ return cpu_pgd[cpu];
14052+}
14053+#endif
14054+
14055 #include <linux/mm_types.h>
14056
14057 static inline int pte_none(pte_t pte)
14058@@ -583,7 +652,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14059
14060 static inline int pgd_bad(pgd_t pgd)
14061 {
14062- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14063+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14064 }
14065
14066 static inline int pgd_none(pgd_t pgd)
14067@@ -606,7 +675,12 @@ static inline int pgd_none(pgd_t pgd)
14068 * pgd_offset() returns a (pgd_t *)
14069 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14070 */
14071-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14072+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14073+
14074+#ifdef CONFIG_PAX_PER_CPU_PGD
14075+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14076+#endif
14077+
14078 /*
14079 * a shortcut which implies the use of the kernel's pgd, instead
14080 * of a process's
14081@@ -617,6 +691,20 @@ static inline int pgd_none(pgd_t pgd)
14082 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14083 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14084
14085+#ifdef CONFIG_X86_32
14086+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14087+#else
14088+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14089+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14090+
14091+#ifdef CONFIG_PAX_MEMORY_UDEREF
14092+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
14093+#else
14094+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
14095+#endif
14096+
14097+#endif
14098+
14099 #ifndef __ASSEMBLY__
14100
14101 extern int direct_gbpages;
14102@@ -781,11 +869,23 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14103 * dst and src can be on the same page, but the range must not overlap,
14104 * and must not cross a page boundary.
14105 */
14106-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14107+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14108 {
14109- memcpy(dst, src, count * sizeof(pgd_t));
14110+ pax_open_kernel();
14111+ while (count--)
14112+ *dst++ = *src++;
14113+ pax_close_kernel();
14114 }
14115
14116+#ifdef CONFIG_PAX_PER_CPU_PGD
14117+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14118+#endif
14119+
14120+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14121+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14122+#else
14123+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14124+#endif
14125
14126 #include <asm-generic/pgtable.h>
14127 #endif /* __ASSEMBLY__ */
14128diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14129index 8faa215..a8a17ea 100644
14130--- a/arch/x86/include/asm/pgtable_32.h
14131+++ b/arch/x86/include/asm/pgtable_32.h
14132@@ -25,9 +25,6 @@
14133 struct mm_struct;
14134 struct vm_area_struct;
14135
14136-extern pgd_t swapper_pg_dir[1024];
14137-extern pgd_t initial_page_table[1024];
14138-
14139 static inline void pgtable_cache_init(void) { }
14140 static inline void check_pgt_cache(void) { }
14141 void paging_init(void);
14142@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14143 # include <asm/pgtable-2level.h>
14144 #endif
14145
14146+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14147+extern pgd_t initial_page_table[PTRS_PER_PGD];
14148+#ifdef CONFIG_X86_PAE
14149+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14150+#endif
14151+
14152 #if defined(CONFIG_HIGHPTE)
14153 #define pte_offset_map(dir, address) \
14154 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14155@@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14156 /* Clear a kernel PTE and flush it from the TLB */
14157 #define kpte_clear_flush(ptep, vaddr) \
14158 do { \
14159+ pax_open_kernel(); \
14160 pte_clear(&init_mm, (vaddr), (ptep)); \
14161+ pax_close_kernel(); \
14162 __flush_tlb_one((vaddr)); \
14163 } while (0)
14164
14165@@ -75,6 +80,9 @@ do { \
14166
14167 #endif /* !__ASSEMBLY__ */
14168
14169+#define HAVE_ARCH_UNMAPPED_AREA
14170+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14171+
14172 /*
14173 * kern_addr_valid() is (1) for FLATMEM and (0) for
14174 * SPARSEMEM and DISCONTIGMEM
14175diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14176index ed5903b..c7fe163 100644
14177--- a/arch/x86/include/asm/pgtable_32_types.h
14178+++ b/arch/x86/include/asm/pgtable_32_types.h
14179@@ -8,7 +8,7 @@
14180 */
14181 #ifdef CONFIG_X86_PAE
14182 # include <asm/pgtable-3level_types.h>
14183-# define PMD_SIZE (1UL << PMD_SHIFT)
14184+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14185 # define PMD_MASK (~(PMD_SIZE - 1))
14186 #else
14187 # include <asm/pgtable-2level_types.h>
14188@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14189 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14190 #endif
14191
14192+#ifdef CONFIG_PAX_KERNEXEC
14193+#ifndef __ASSEMBLY__
14194+extern unsigned char MODULES_EXEC_VADDR[];
14195+extern unsigned char MODULES_EXEC_END[];
14196+#endif
14197+#include <asm/boot.h>
14198+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14199+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14200+#else
14201+#define ktla_ktva(addr) (addr)
14202+#define ktva_ktla(addr) (addr)
14203+#endif
14204+
14205 #define MODULES_VADDR VMALLOC_START
14206 #define MODULES_END VMALLOC_END
14207 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14208diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14209index 47356f9..deb94a2 100644
14210--- a/arch/x86/include/asm/pgtable_64.h
14211+++ b/arch/x86/include/asm/pgtable_64.h
14212@@ -16,10 +16,14 @@
14213
14214 extern pud_t level3_kernel_pgt[512];
14215 extern pud_t level3_ident_pgt[512];
14216+extern pud_t level3_vmalloc_start_pgt[512];
14217+extern pud_t level3_vmalloc_end_pgt[512];
14218+extern pud_t level3_vmemmap_pgt[512];
14219+extern pud_t level2_vmemmap_pgt[512];
14220 extern pmd_t level2_kernel_pgt[512];
14221 extern pmd_t level2_fixmap_pgt[512];
14222-extern pmd_t level2_ident_pgt[512];
14223-extern pgd_t init_level4_pgt[];
14224+extern pmd_t level2_ident_pgt[512*2];
14225+extern pgd_t init_level4_pgt[512];
14226
14227 #define swapper_pg_dir init_level4_pgt
14228
14229@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14230
14231 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14232 {
14233+ pax_open_kernel();
14234 *pmdp = pmd;
14235+ pax_close_kernel();
14236 }
14237
14238 static inline void native_pmd_clear(pmd_t *pmd)
14239@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14240
14241 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14242 {
14243+ pax_open_kernel();
14244 *pudp = pud;
14245+ pax_close_kernel();
14246 }
14247
14248 static inline void native_pud_clear(pud_t *pud)
14249@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14250
14251 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14252 {
14253+ pax_open_kernel();
14254+ *pgdp = pgd;
14255+ pax_close_kernel();
14256+}
14257+
14258+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14259+{
14260 *pgdp = pgd;
14261 }
14262
14263diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14264index 766ea16..5b96cb3 100644
14265--- a/arch/x86/include/asm/pgtable_64_types.h
14266+++ b/arch/x86/include/asm/pgtable_64_types.h
14267@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
14268 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14269 #define MODULES_END _AC(0xffffffffff000000, UL)
14270 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14271+#define MODULES_EXEC_VADDR MODULES_VADDR
14272+#define MODULES_EXEC_END MODULES_END
14273+
14274+#define ktla_ktva(addr) (addr)
14275+#define ktva_ktla(addr) (addr)
14276
14277 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
14278diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14279index 3c32db8..1ddccf5 100644
14280--- a/arch/x86/include/asm/pgtable_types.h
14281+++ b/arch/x86/include/asm/pgtable_types.h
14282@@ -16,13 +16,12 @@
14283 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14284 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14285 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14286-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14287+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14288 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14289 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14290 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14291-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14292-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14293-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14294+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14295+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14296 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14297
14298 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14299@@ -40,7 +39,6 @@
14300 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14301 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14302 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14303-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14304 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14305 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14306 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14307@@ -57,8 +55,10 @@
14308
14309 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14310 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14311-#else
14312+#elif defined(CONFIG_KMEMCHECK)
14313 #define _PAGE_NX (_AT(pteval_t, 0))
14314+#else
14315+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14316 #endif
14317
14318 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14319@@ -116,6 +116,9 @@
14320 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14321 _PAGE_ACCESSED)
14322
14323+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14324+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14325+
14326 #define __PAGE_KERNEL_EXEC \
14327 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14328 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14329@@ -126,7 +129,7 @@
14330 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14331 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14332 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14333-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14334+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14335 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14336 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14337 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14338@@ -188,8 +191,8 @@
14339 * bits are combined, this will alow user to access the high address mapped
14340 * VDSO in the presence of CONFIG_COMPAT_VDSO
14341 */
14342-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14343-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14344+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14345+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14346 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14347 #endif
14348
14349@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14350 {
14351 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14352 }
14353+#endif
14354
14355+#if PAGETABLE_LEVELS == 3
14356+#include <asm-generic/pgtable-nopud.h>
14357+#endif
14358+
14359+#if PAGETABLE_LEVELS == 2
14360+#include <asm-generic/pgtable-nopmd.h>
14361+#endif
14362+
14363+#ifndef __ASSEMBLY__
14364 #if PAGETABLE_LEVELS > 3
14365 typedef struct { pudval_t pud; } pud_t;
14366
14367@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14368 return pud.pud;
14369 }
14370 #else
14371-#include <asm-generic/pgtable-nopud.h>
14372-
14373 static inline pudval_t native_pud_val(pud_t pud)
14374 {
14375 return native_pgd_val(pud.pgd);
14376@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14377 return pmd.pmd;
14378 }
14379 #else
14380-#include <asm-generic/pgtable-nopmd.h>
14381-
14382 static inline pmdval_t native_pmd_val(pmd_t pmd)
14383 {
14384 return native_pgd_val(pmd.pud.pgd);
14385@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14386
14387 extern pteval_t __supported_pte_mask;
14388 extern void set_nx(void);
14389-extern int nx_enabled;
14390
14391 #define pgprot_writecombine pgprot_writecombine
14392 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14393diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14394index 888184b..a07ac89 100644
14395--- a/arch/x86/include/asm/processor.h
14396+++ b/arch/x86/include/asm/processor.h
14397@@ -287,7 +287,7 @@ struct tss_struct {
14398
14399 } ____cacheline_aligned;
14400
14401-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14402+extern struct tss_struct init_tss[NR_CPUS];
14403
14404 /*
14405 * Save the original ist values for checking stack pointers during debugging
14406@@ -827,11 +827,18 @@ static inline void spin_lock_prefetch(const void *x)
14407 */
14408 #define TASK_SIZE PAGE_OFFSET
14409 #define TASK_SIZE_MAX TASK_SIZE
14410+
14411+#ifdef CONFIG_PAX_SEGMEXEC
14412+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14413+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14414+#else
14415 #define STACK_TOP TASK_SIZE
14416-#define STACK_TOP_MAX STACK_TOP
14417+#endif
14418+
14419+#define STACK_TOP_MAX TASK_SIZE
14420
14421 #define INIT_THREAD { \
14422- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14423+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14424 .vm86_info = NULL, \
14425 .sysenter_cs = __KERNEL_CS, \
14426 .io_bitmap_ptr = NULL, \
14427@@ -845,7 +852,7 @@ static inline void spin_lock_prefetch(const void *x)
14428 */
14429 #define INIT_TSS { \
14430 .x86_tss = { \
14431- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14432+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14433 .ss0 = __KERNEL_DS, \
14434 .ss1 = __KERNEL_CS, \
14435 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14436@@ -856,11 +863,7 @@ static inline void spin_lock_prefetch(const void *x)
14437 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14438
14439 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14440-#define KSTK_TOP(info) \
14441-({ \
14442- unsigned long *__ptr = (unsigned long *)(info); \
14443- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14444-})
14445+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14446
14447 /*
14448 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14449@@ -875,7 +878,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14450 #define task_pt_regs(task) \
14451 ({ \
14452 struct pt_regs *__regs__; \
14453- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14454+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14455 __regs__ - 1; \
14456 })
14457
14458@@ -885,13 +888,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14459 /*
14460 * User space process size. 47bits minus one guard page.
14461 */
14462-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14463+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14464
14465 /* This decides where the kernel will search for a free chunk of vm
14466 * space during mmap's.
14467 */
14468 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14469- 0xc0000000 : 0xFFFFe000)
14470+ 0xc0000000 : 0xFFFFf000)
14471
14472 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14473 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14474@@ -902,11 +905,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14475 #define STACK_TOP_MAX TASK_SIZE_MAX
14476
14477 #define INIT_THREAD { \
14478- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14479+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14480 }
14481
14482 #define INIT_TSS { \
14483- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14484+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14485 }
14486
14487 /*
14488@@ -934,6 +937,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14489 */
14490 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14491
14492+#ifdef CONFIG_PAX_SEGMEXEC
14493+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14494+#endif
14495+
14496 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14497
14498 /* Get/set a process' ability to use the timestamp counter instruction */
14499@@ -994,12 +1001,12 @@ extern bool cpu_has_amd_erratum(const int *);
14500 #define cpu_has_amd_erratum(x) (false)
14501 #endif /* CONFIG_CPU_SUP_AMD */
14502
14503-extern unsigned long arch_align_stack(unsigned long sp);
14504+#define arch_align_stack(x) ((x) & ~0xfUL)
14505 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14506
14507 void default_idle(void);
14508 bool set_pm_idle_to_default(void);
14509
14510-void stop_this_cpu(void *dummy);
14511+void stop_this_cpu(void *dummy) __noreturn;
14512
14513 #endif /* _ASM_X86_PROCESSOR_H */
14514diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14515index 942a086..6c26446 100644
14516--- a/arch/x86/include/asm/ptrace.h
14517+++ b/arch/x86/include/asm/ptrace.h
14518@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14519 }
14520
14521 /*
14522- * user_mode_vm(regs) determines whether a register set came from user mode.
14523+ * user_mode(regs) determines whether a register set came from user mode.
14524 * This is true if V8086 mode was enabled OR if the register set was from
14525 * protected mode with RPL-3 CS value. This tricky test checks that with
14526 * one comparison. Many places in the kernel can bypass this full check
14527- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14528+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14529+ * be used.
14530 */
14531-static inline int user_mode(struct pt_regs *regs)
14532+static inline int user_mode_novm(struct pt_regs *regs)
14533 {
14534 #ifdef CONFIG_X86_32
14535 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14536 #else
14537- return !!(regs->cs & 3);
14538+ return !!(regs->cs & SEGMENT_RPL_MASK);
14539 #endif
14540 }
14541
14542-static inline int user_mode_vm(struct pt_regs *regs)
14543+static inline int user_mode(struct pt_regs *regs)
14544 {
14545 #ifdef CONFIG_X86_32
14546 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
14547 USER_RPL;
14548 #else
14549- return user_mode(regs);
14550+ return user_mode_novm(regs);
14551 #endif
14552 }
14553
14554@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
14555 #ifdef CONFIG_X86_64
14556 static inline bool user_64bit_mode(struct pt_regs *regs)
14557 {
14558+ unsigned long cs = regs->cs & 0xffff;
14559 #ifndef CONFIG_PARAVIRT
14560 /*
14561 * On non-paravirt systems, this is the only long mode CPL 3
14562 * selector. We do not allow long mode selectors in the LDT.
14563 */
14564- return regs->cs == __USER_CS;
14565+ return cs == __USER_CS;
14566 #else
14567 /* Headers are too twisted for this to go in paravirt.h. */
14568- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
14569+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
14570 #endif
14571 }
14572
14573@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
14574 * Traps from the kernel do not save sp and ss.
14575 * Use the helper function to retrieve sp.
14576 */
14577- if (offset == offsetof(struct pt_regs, sp) &&
14578- regs->cs == __KERNEL_CS)
14579- return kernel_stack_pointer(regs);
14580+ if (offset == offsetof(struct pt_regs, sp)) {
14581+ unsigned long cs = regs->cs & 0xffff;
14582+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
14583+ return kernel_stack_pointer(regs);
14584+ }
14585 #endif
14586 return *(unsigned long *)((unsigned long)regs + offset);
14587 }
14588diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
14589index fe1ec5b..dc5c3fe 100644
14590--- a/arch/x86/include/asm/realmode.h
14591+++ b/arch/x86/include/asm/realmode.h
14592@@ -22,16 +22,14 @@ struct real_mode_header {
14593 #endif
14594 /* APM/BIOS reboot */
14595 u32 machine_real_restart_asm;
14596-#ifdef CONFIG_X86_64
14597 u32 machine_real_restart_seg;
14598-#endif
14599 };
14600
14601 /* This must match data at trampoline_32/64.S */
14602 struct trampoline_header {
14603 #ifdef CONFIG_X86_32
14604 u32 start;
14605- u16 gdt_pad;
14606+ u16 boot_cs;
14607 u16 gdt_limit;
14608 u32 gdt_base;
14609 #else
14610diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
14611index a82c4f1..ac45053 100644
14612--- a/arch/x86/include/asm/reboot.h
14613+++ b/arch/x86/include/asm/reboot.h
14614@@ -6,13 +6,13 @@
14615 struct pt_regs;
14616
14617 struct machine_ops {
14618- void (*restart)(char *cmd);
14619- void (*halt)(void);
14620- void (*power_off)(void);
14621+ void (* __noreturn restart)(char *cmd);
14622+ void (* __noreturn halt)(void);
14623+ void (* __noreturn power_off)(void);
14624 void (*shutdown)(void);
14625 void (*crash_shutdown)(struct pt_regs *);
14626- void (*emergency_restart)(void);
14627-};
14628+ void (* __noreturn emergency_restart)(void);
14629+} __no_const;
14630
14631 extern struct machine_ops machine_ops;
14632
14633diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
14634index 2dbe4a7..ce1db00 100644
14635--- a/arch/x86/include/asm/rwsem.h
14636+++ b/arch/x86/include/asm/rwsem.h
14637@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
14638 {
14639 asm volatile("# beginning down_read\n\t"
14640 LOCK_PREFIX _ASM_INC "(%1)\n\t"
14641+
14642+#ifdef CONFIG_PAX_REFCOUNT
14643+ "jno 0f\n"
14644+ LOCK_PREFIX _ASM_DEC "(%1)\n"
14645+ "int $4\n0:\n"
14646+ _ASM_EXTABLE(0b, 0b)
14647+#endif
14648+
14649 /* adds 0x00000001 */
14650 " jns 1f\n"
14651 " call call_rwsem_down_read_failed\n"
14652@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
14653 "1:\n\t"
14654 " mov %1,%2\n\t"
14655 " add %3,%2\n\t"
14656+
14657+#ifdef CONFIG_PAX_REFCOUNT
14658+ "jno 0f\n"
14659+ "sub %3,%2\n"
14660+ "int $4\n0:\n"
14661+ _ASM_EXTABLE(0b, 0b)
14662+#endif
14663+
14664 " jle 2f\n\t"
14665 LOCK_PREFIX " cmpxchg %2,%0\n\t"
14666 " jnz 1b\n\t"
14667@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
14668 long tmp;
14669 asm volatile("# beginning down_write\n\t"
14670 LOCK_PREFIX " xadd %1,(%2)\n\t"
14671+
14672+#ifdef CONFIG_PAX_REFCOUNT
14673+ "jno 0f\n"
14674+ "mov %1,(%2)\n"
14675+ "int $4\n0:\n"
14676+ _ASM_EXTABLE(0b, 0b)
14677+#endif
14678+
14679 /* adds 0xffff0001, returns the old value */
14680 " test %1,%1\n\t"
14681 /* was the count 0 before? */
14682@@ -141,6 +165,14 @@ static inline void __up_read(struct rw_semaphore *sem)
14683 long tmp;
14684 asm volatile("# beginning __up_read\n\t"
14685 LOCK_PREFIX " xadd %1,(%2)\n\t"
14686+
14687+#ifdef CONFIG_PAX_REFCOUNT
14688+ "jno 0f\n"
14689+ "mov %1,(%2)\n"
14690+ "int $4\n0:\n"
14691+ _ASM_EXTABLE(0b, 0b)
14692+#endif
14693+
14694 /* subtracts 1, returns the old value */
14695 " jns 1f\n\t"
14696 " call call_rwsem_wake\n" /* expects old value in %edx */
14697@@ -159,6 +191,14 @@ static inline void __up_write(struct rw_semaphore *sem)
14698 long tmp;
14699 asm volatile("# beginning __up_write\n\t"
14700 LOCK_PREFIX " xadd %1,(%2)\n\t"
14701+
14702+#ifdef CONFIG_PAX_REFCOUNT
14703+ "jno 0f\n"
14704+ "mov %1,(%2)\n"
14705+ "int $4\n0:\n"
14706+ _ASM_EXTABLE(0b, 0b)
14707+#endif
14708+
14709 /* subtracts 0xffff0001, returns the old value */
14710 " jns 1f\n\t"
14711 " call call_rwsem_wake\n" /* expects old value in %edx */
14712@@ -176,6 +216,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
14713 {
14714 asm volatile("# beginning __downgrade_write\n\t"
14715 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
14716+
14717+#ifdef CONFIG_PAX_REFCOUNT
14718+ "jno 0f\n"
14719+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
14720+ "int $4\n0:\n"
14721+ _ASM_EXTABLE(0b, 0b)
14722+#endif
14723+
14724 /*
14725 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
14726 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
14727@@ -194,7 +242,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
14728 */
14729 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
14730 {
14731- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
14732+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
14733+
14734+#ifdef CONFIG_PAX_REFCOUNT
14735+ "jno 0f\n"
14736+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
14737+ "int $4\n0:\n"
14738+ _ASM_EXTABLE(0b, 0b)
14739+#endif
14740+
14741 : "+m" (sem->count)
14742 : "er" (delta));
14743 }
14744@@ -204,7 +260,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
14745 */
14746 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
14747 {
14748- return delta + xadd(&sem->count, delta);
14749+ return delta + xadd_check_overflow(&sem->count, delta);
14750 }
14751
14752 #endif /* __KERNEL__ */
14753diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
14754index c48a950..c6d7468 100644
14755--- a/arch/x86/include/asm/segment.h
14756+++ b/arch/x86/include/asm/segment.h
14757@@ -64,10 +64,15 @@
14758 * 26 - ESPFIX small SS
14759 * 27 - per-cpu [ offset to per-cpu data area ]
14760 * 28 - stack_canary-20 [ for stack protector ]
14761- * 29 - unused
14762- * 30 - unused
14763+ * 29 - PCI BIOS CS
14764+ * 30 - PCI BIOS DS
14765 * 31 - TSS for double fault handler
14766 */
14767+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
14768+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
14769+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
14770+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
14771+
14772 #define GDT_ENTRY_TLS_MIN 6
14773 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
14774
14775@@ -79,6 +84,8 @@
14776
14777 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
14778
14779+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
14780+
14781 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
14782
14783 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
14784@@ -104,6 +111,12 @@
14785 #define __KERNEL_STACK_CANARY 0
14786 #endif
14787
14788+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
14789+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
14790+
14791+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
14792+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
14793+
14794 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
14795
14796 /*
14797@@ -141,7 +154,7 @@
14798 */
14799
14800 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
14801-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
14802+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
14803
14804
14805 #else
14806@@ -165,6 +178,8 @@
14807 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
14808 #define __USER32_DS __USER_DS
14809
14810+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
14811+
14812 #define GDT_ENTRY_TSS 8 /* needs two entries */
14813 #define GDT_ENTRY_LDT 10 /* needs two entries */
14814 #define GDT_ENTRY_TLS_MIN 12
14815@@ -185,6 +200,7 @@
14816 #endif
14817
14818 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
14819+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
14820 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
14821 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
14822 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
14823@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
14824 {
14825 unsigned long __limit;
14826 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
14827- return __limit + 1;
14828+ return __limit;
14829 }
14830
14831 #endif /* !__ASSEMBLY__ */
14832diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
14833index b073aae..39f9bdd 100644
14834--- a/arch/x86/include/asm/smp.h
14835+++ b/arch/x86/include/asm/smp.h
14836@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
14837 /* cpus sharing the last level cache: */
14838 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
14839 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
14840-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
14841+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
14842
14843 static inline struct cpumask *cpu_sibling_mask(int cpu)
14844 {
14845@@ -79,7 +79,7 @@ struct smp_ops {
14846
14847 void (*send_call_func_ipi)(const struct cpumask *mask);
14848 void (*send_call_func_single_ipi)(int cpu);
14849-};
14850+} __no_const;
14851
14852 /* Globals due to paravirt */
14853 extern void set_cpu_sibling_map(int cpu);
14854@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
14855 extern int safe_smp_processor_id(void);
14856
14857 #elif defined(CONFIG_X86_64_SMP)
14858-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
14859-
14860-#define stack_smp_processor_id() \
14861-({ \
14862- struct thread_info *ti; \
14863- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
14864- ti->cpu; \
14865-})
14866+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
14867+#define stack_smp_processor_id() raw_smp_processor_id()
14868 #define safe_smp_processor_id() smp_processor_id()
14869
14870 #endif
14871diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
14872index 33692ea..350a534 100644
14873--- a/arch/x86/include/asm/spinlock.h
14874+++ b/arch/x86/include/asm/spinlock.h
14875@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
14876 static inline void arch_read_lock(arch_rwlock_t *rw)
14877 {
14878 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
14879+
14880+#ifdef CONFIG_PAX_REFCOUNT
14881+ "jno 0f\n"
14882+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
14883+ "int $4\n0:\n"
14884+ _ASM_EXTABLE(0b, 0b)
14885+#endif
14886+
14887 "jns 1f\n"
14888 "call __read_lock_failed\n\t"
14889 "1:\n"
14890@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
14891 static inline void arch_write_lock(arch_rwlock_t *rw)
14892 {
14893 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
14894+
14895+#ifdef CONFIG_PAX_REFCOUNT
14896+ "jno 0f\n"
14897+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
14898+ "int $4\n0:\n"
14899+ _ASM_EXTABLE(0b, 0b)
14900+#endif
14901+
14902 "jz 1f\n"
14903 "call __write_lock_failed\n\t"
14904 "1:\n"
14905@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
14906
14907 static inline void arch_read_unlock(arch_rwlock_t *rw)
14908 {
14909- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
14910+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
14911+
14912+#ifdef CONFIG_PAX_REFCOUNT
14913+ "jno 0f\n"
14914+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
14915+ "int $4\n0:\n"
14916+ _ASM_EXTABLE(0b, 0b)
14917+#endif
14918+
14919 :"+m" (rw->lock) : : "memory");
14920 }
14921
14922 static inline void arch_write_unlock(arch_rwlock_t *rw)
14923 {
14924- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
14925+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
14926+
14927+#ifdef CONFIG_PAX_REFCOUNT
14928+ "jno 0f\n"
14929+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
14930+ "int $4\n0:\n"
14931+ _ASM_EXTABLE(0b, 0b)
14932+#endif
14933+
14934 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
14935 }
14936
14937diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
14938index 6a99859..03cb807 100644
14939--- a/arch/x86/include/asm/stackprotector.h
14940+++ b/arch/x86/include/asm/stackprotector.h
14941@@ -47,7 +47,7 @@
14942 * head_32 for boot CPU and setup_per_cpu_areas() for others.
14943 */
14944 #define GDT_STACK_CANARY_INIT \
14945- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
14946+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
14947
14948 /*
14949 * Initialize the stackprotector canary value.
14950@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
14951
14952 static inline void load_stack_canary_segment(void)
14953 {
14954-#ifdef CONFIG_X86_32
14955+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14956 asm volatile ("mov %0, %%gs" : : "r" (0));
14957 #endif
14958 }
14959diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
14960index 70bbe39..4ae2bd4 100644
14961--- a/arch/x86/include/asm/stacktrace.h
14962+++ b/arch/x86/include/asm/stacktrace.h
14963@@ -11,28 +11,20 @@
14964
14965 extern int kstack_depth_to_print;
14966
14967-struct thread_info;
14968+struct task_struct;
14969 struct stacktrace_ops;
14970
14971-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
14972- unsigned long *stack,
14973- unsigned long bp,
14974- const struct stacktrace_ops *ops,
14975- void *data,
14976- unsigned long *end,
14977- int *graph);
14978+typedef unsigned long walk_stack_t(struct task_struct *task,
14979+ void *stack_start,
14980+ unsigned long *stack,
14981+ unsigned long bp,
14982+ const struct stacktrace_ops *ops,
14983+ void *data,
14984+ unsigned long *end,
14985+ int *graph);
14986
14987-extern unsigned long
14988-print_context_stack(struct thread_info *tinfo,
14989- unsigned long *stack, unsigned long bp,
14990- const struct stacktrace_ops *ops, void *data,
14991- unsigned long *end, int *graph);
14992-
14993-extern unsigned long
14994-print_context_stack_bp(struct thread_info *tinfo,
14995- unsigned long *stack, unsigned long bp,
14996- const struct stacktrace_ops *ops, void *data,
14997- unsigned long *end, int *graph);
14998+extern walk_stack_t print_context_stack;
14999+extern walk_stack_t print_context_stack_bp;
15000
15001 /* Generic stack tracer with callbacks */
15002
15003@@ -40,7 +32,7 @@ struct stacktrace_ops {
15004 void (*address)(void *data, unsigned long address, int reliable);
15005 /* On negative return stop dumping */
15006 int (*stack)(void *data, char *name);
15007- walk_stack_t walk_stack;
15008+ walk_stack_t *walk_stack;
15009 };
15010
15011 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15012diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15013index 4ec45b3..a4f0a8a 100644
15014--- a/arch/x86/include/asm/switch_to.h
15015+++ b/arch/x86/include/asm/switch_to.h
15016@@ -108,7 +108,7 @@ do { \
15017 "call __switch_to\n\t" \
15018 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15019 __switch_canary \
15020- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15021+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15022 "movq %%rax,%%rdi\n\t" \
15023 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15024 "jnz ret_from_fork\n\t" \
15025@@ -119,7 +119,7 @@ do { \
15026 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15027 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15028 [_tif_fork] "i" (_TIF_FORK), \
15029- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15030+ [thread_info] "m" (current_tinfo), \
15031 [current_task] "m" (current_task) \
15032 __switch_canary_iparam \
15033 : "memory", "cc" __EXTRA_CLOBBER)
15034diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15035index 2d946e6..e453ec4 100644
15036--- a/arch/x86/include/asm/thread_info.h
15037+++ b/arch/x86/include/asm/thread_info.h
15038@@ -10,6 +10,7 @@
15039 #include <linux/compiler.h>
15040 #include <asm/page.h>
15041 #include <asm/types.h>
15042+#include <asm/percpu.h>
15043
15044 /*
15045 * low level task data that entry.S needs immediate access to
15046@@ -24,7 +25,6 @@ struct exec_domain;
15047 #include <linux/atomic.h>
15048
15049 struct thread_info {
15050- struct task_struct *task; /* main task structure */
15051 struct exec_domain *exec_domain; /* execution domain */
15052 __u32 flags; /* low level flags */
15053 __u32 status; /* thread synchronous flags */
15054@@ -34,19 +34,13 @@ struct thread_info {
15055 mm_segment_t addr_limit;
15056 struct restart_block restart_block;
15057 void __user *sysenter_return;
15058-#ifdef CONFIG_X86_32
15059- unsigned long previous_esp; /* ESP of the previous stack in
15060- case of nested (IRQ) stacks
15061- */
15062- __u8 supervisor_stack[0];
15063-#endif
15064+ unsigned long lowest_stack;
15065 unsigned int sig_on_uaccess_error:1;
15066 unsigned int uaccess_err:1; /* uaccess failed */
15067 };
15068
15069-#define INIT_THREAD_INFO(tsk) \
15070+#define INIT_THREAD_INFO \
15071 { \
15072- .task = &tsk, \
15073 .exec_domain = &default_exec_domain, \
15074 .flags = 0, \
15075 .cpu = 0, \
15076@@ -57,7 +51,7 @@ struct thread_info {
15077 }, \
15078 }
15079
15080-#define init_thread_info (init_thread_union.thread_info)
15081+#define init_thread_info (init_thread_union.stack)
15082 #define init_stack (init_thread_union.stack)
15083
15084 #else /* !__ASSEMBLY__ */
15085@@ -98,6 +92,7 @@ struct thread_info {
15086 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15087 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15088 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15089+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15090
15091 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15092 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15093@@ -122,17 +117,18 @@ struct thread_info {
15094 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15095 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15096 #define _TIF_X32 (1 << TIF_X32)
15097+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15098
15099 /* work to do in syscall_trace_enter() */
15100 #define _TIF_WORK_SYSCALL_ENTRY \
15101 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15102 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15103- _TIF_NOHZ)
15104+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15105
15106 /* work to do in syscall_trace_leave() */
15107 #define _TIF_WORK_SYSCALL_EXIT \
15108 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15109- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15110+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15111
15112 /* work to do on interrupt/exception return */
15113 #define _TIF_WORK_MASK \
15114@@ -143,7 +139,7 @@ struct thread_info {
15115 /* work to do on any return to user space */
15116 #define _TIF_ALLWORK_MASK \
15117 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15118- _TIF_NOHZ)
15119+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15120
15121 /* Only used for 64 bit */
15122 #define _TIF_DO_NOTIFY_MASK \
15123@@ -159,45 +155,40 @@ struct thread_info {
15124
15125 #define PREEMPT_ACTIVE 0x10000000
15126
15127-#ifdef CONFIG_X86_32
15128-
15129-#define STACK_WARN (THREAD_SIZE/8)
15130-/*
15131- * macros/functions for gaining access to the thread information structure
15132- *
15133- * preempt_count needs to be 1 initially, until the scheduler is functional.
15134- */
15135-#ifndef __ASSEMBLY__
15136-
15137-
15138-/* how to get the current stack pointer from C */
15139-register unsigned long current_stack_pointer asm("esp") __used;
15140-
15141-/* how to get the thread information struct from C */
15142-static inline struct thread_info *current_thread_info(void)
15143-{
15144- return (struct thread_info *)
15145- (current_stack_pointer & ~(THREAD_SIZE - 1));
15146-}
15147-
15148-#else /* !__ASSEMBLY__ */
15149-
15150+#ifdef __ASSEMBLY__
15151 /* how to get the thread information struct from ASM */
15152 #define GET_THREAD_INFO(reg) \
15153- movl $-THREAD_SIZE, reg; \
15154- andl %esp, reg
15155+ mov PER_CPU_VAR(current_tinfo), reg
15156
15157 /* use this one if reg already contains %esp */
15158-#define GET_THREAD_INFO_WITH_ESP(reg) \
15159- andl $-THREAD_SIZE, reg
15160+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15161+#else
15162+/* how to get the thread information struct from C */
15163+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15164+
15165+static __always_inline struct thread_info *current_thread_info(void)
15166+{
15167+ return this_cpu_read_stable(current_tinfo);
15168+}
15169+#endif
15170+
15171+#ifdef CONFIG_X86_32
15172+
15173+#define STACK_WARN (THREAD_SIZE/8)
15174+/*
15175+ * macros/functions for gaining access to the thread information structure
15176+ *
15177+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15178+ */
15179+#ifndef __ASSEMBLY__
15180+
15181+/* how to get the current stack pointer from C */
15182+register unsigned long current_stack_pointer asm("esp") __used;
15183
15184 #endif
15185
15186 #else /* X86_32 */
15187
15188-#include <asm/percpu.h>
15189-#define KERNEL_STACK_OFFSET (5*8)
15190-
15191 /*
15192 * macros/functions for gaining access to the thread information structure
15193 * preempt_count needs to be 1 initially, until the scheduler is functional.
15194@@ -205,27 +196,8 @@ static inline struct thread_info *current_thread_info(void)
15195 #ifndef __ASSEMBLY__
15196 DECLARE_PER_CPU(unsigned long, kernel_stack);
15197
15198-static inline struct thread_info *current_thread_info(void)
15199-{
15200- struct thread_info *ti;
15201- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15202- KERNEL_STACK_OFFSET - THREAD_SIZE);
15203- return ti;
15204-}
15205-
15206-#else /* !__ASSEMBLY__ */
15207-
15208-/* how to get the thread information struct from ASM */
15209-#define GET_THREAD_INFO(reg) \
15210- movq PER_CPU_VAR(kernel_stack),reg ; \
15211- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15212-
15213-/*
15214- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15215- * a certain register (to be used in assembler memory operands).
15216- */
15217-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15218-
15219+/* how to get the current stack pointer from C */
15220+register unsigned long current_stack_pointer asm("rsp") __used;
15221 #endif
15222
15223 #endif /* !X86_32 */
15224@@ -286,5 +258,12 @@ static inline bool is_ia32_task(void)
15225 extern void arch_task_cache_init(void);
15226 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15227 extern void arch_release_task_struct(struct task_struct *tsk);
15228+
15229+#define __HAVE_THREAD_FUNCTIONS
15230+#define task_thread_info(task) (&(task)->tinfo)
15231+#define task_stack_page(task) ((task)->stack)
15232+#define setup_thread_stack(p, org) do {} while (0)
15233+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15234+
15235 #endif
15236 #endif /* _ASM_X86_THREAD_INFO_H */
15237diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15238index 1709801..0a60f2f 100644
15239--- a/arch/x86/include/asm/uaccess.h
15240+++ b/arch/x86/include/asm/uaccess.h
15241@@ -7,6 +7,7 @@
15242 #include <linux/compiler.h>
15243 #include <linux/thread_info.h>
15244 #include <linux/string.h>
15245+#include <linux/sched.h>
15246 #include <asm/asm.h>
15247 #include <asm/page.h>
15248 #include <asm/smap.h>
15249@@ -29,7 +30,12 @@
15250
15251 #define get_ds() (KERNEL_DS)
15252 #define get_fs() (current_thread_info()->addr_limit)
15253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15254+void __set_fs(mm_segment_t x);
15255+void set_fs(mm_segment_t x);
15256+#else
15257 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15258+#endif
15259
15260 #define segment_eq(a, b) ((a).seg == (b).seg)
15261
15262@@ -77,8 +83,33 @@
15263 * checks that the pointer is in the user space range - after calling
15264 * this function, memory access functions may still return -EFAULT.
15265 */
15266-#define access_ok(type, addr, size) \
15267- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15268+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15269+#define access_ok(type, addr, size) \
15270+({ \
15271+ long __size = size; \
15272+ unsigned long __addr = (unsigned long)addr; \
15273+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15274+ unsigned long __end_ao = __addr + __size - 1; \
15275+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15276+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15277+ while(__addr_ao <= __end_ao) { \
15278+ char __c_ao; \
15279+ __addr_ao += PAGE_SIZE; \
15280+ if (__size > PAGE_SIZE) \
15281+ cond_resched(); \
15282+ if (__get_user(__c_ao, (char __user *)__addr)) \
15283+ break; \
15284+ if (type != VERIFY_WRITE) { \
15285+ __addr = __addr_ao; \
15286+ continue; \
15287+ } \
15288+ if (__put_user(__c_ao, (char __user *)__addr)) \
15289+ break; \
15290+ __addr = __addr_ao; \
15291+ } \
15292+ } \
15293+ __ret_ao; \
15294+})
15295
15296 /*
15297 * The exception table consists of pairs of addresses relative to the
15298@@ -189,13 +220,21 @@ extern int __get_user_bad(void);
15299 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15300 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15301
15302-
15303+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15304+#define __copyuser_seg "gs;"
15305+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15306+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15307+#else
15308+#define __copyuser_seg
15309+#define __COPYUSER_SET_ES
15310+#define __COPYUSER_RESTORE_ES
15311+#endif
15312
15313 #ifdef CONFIG_X86_32
15314 #define __put_user_asm_u64(x, addr, err, errret) \
15315 asm volatile(ASM_STAC "\n" \
15316- "1: movl %%eax,0(%2)\n" \
15317- "2: movl %%edx,4(%2)\n" \
15318+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15319+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15320 "3: " ASM_CLAC "\n" \
15321 ".section .fixup,\"ax\"\n" \
15322 "4: movl %3,%0\n" \
15323@@ -208,8 +247,8 @@ extern int __get_user_bad(void);
15324
15325 #define __put_user_asm_ex_u64(x, addr) \
15326 asm volatile(ASM_STAC "\n" \
15327- "1: movl %%eax,0(%1)\n" \
15328- "2: movl %%edx,4(%1)\n" \
15329+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15330+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15331 "3: " ASM_CLAC "\n" \
15332 _ASM_EXTABLE_EX(1b, 2b) \
15333 _ASM_EXTABLE_EX(2b, 3b) \
15334@@ -259,7 +298,7 @@ extern void __put_user_8(void);
15335 __typeof__(*(ptr)) __pu_val; \
15336 __chk_user_ptr(ptr); \
15337 might_fault(); \
15338- __pu_val = x; \
15339+ __pu_val = (x); \
15340 switch (sizeof(*(ptr))) { \
15341 case 1: \
15342 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15343@@ -358,7 +397,7 @@ do { \
15344
15345 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15346 asm volatile(ASM_STAC "\n" \
15347- "1: mov"itype" %2,%"rtype"1\n" \
15348+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15349 "2: " ASM_CLAC "\n" \
15350 ".section .fixup,\"ax\"\n" \
15351 "3: mov %3,%0\n" \
15352@@ -366,7 +405,7 @@ do { \
15353 " jmp 2b\n" \
15354 ".previous\n" \
15355 _ASM_EXTABLE(1b, 3b) \
15356- : "=r" (err), ltype(x) \
15357+ : "=r" (err), ltype (x) \
15358 : "m" (__m(addr)), "i" (errret), "0" (err))
15359
15360 #define __get_user_size_ex(x, ptr, size) \
15361@@ -391,7 +430,7 @@ do { \
15362 } while (0)
15363
15364 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15365- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15366+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15367 "2:\n" \
15368 _ASM_EXTABLE_EX(1b, 2b) \
15369 : ltype(x) : "m" (__m(addr)))
15370@@ -408,13 +447,24 @@ do { \
15371 int __gu_err; \
15372 unsigned long __gu_val; \
15373 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15374- (x) = (__force __typeof__(*(ptr)))__gu_val; \
15375+ (x) = (__typeof__(*(ptr)))__gu_val; \
15376 __gu_err; \
15377 })
15378
15379 /* FIXME: this hack is definitely wrong -AK */
15380 struct __large_struct { unsigned long buf[100]; };
15381-#define __m(x) (*(struct __large_struct __user *)(x))
15382+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15383+#define ____m(x) \
15384+({ \
15385+ unsigned long ____x = (unsigned long)(x); \
15386+ if (____x < PAX_USER_SHADOW_BASE) \
15387+ ____x += PAX_USER_SHADOW_BASE; \
15388+ (void __user *)____x; \
15389+})
15390+#else
15391+#define ____m(x) (x)
15392+#endif
15393+#define __m(x) (*(struct __large_struct __user *)____m(x))
15394
15395 /*
15396 * Tell gcc we read from memory instead of writing: this is because
15397@@ -423,7 +473,7 @@ struct __large_struct { unsigned long buf[100]; };
15398 */
15399 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15400 asm volatile(ASM_STAC "\n" \
15401- "1: mov"itype" %"rtype"1,%2\n" \
15402+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15403 "2: " ASM_CLAC "\n" \
15404 ".section .fixup,\"ax\"\n" \
15405 "3: mov %3,%0\n" \
15406@@ -431,10 +481,10 @@ struct __large_struct { unsigned long buf[100]; };
15407 ".previous\n" \
15408 _ASM_EXTABLE(1b, 3b) \
15409 : "=r"(err) \
15410- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15411+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15412
15413 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15414- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15415+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15416 "2:\n" \
15417 _ASM_EXTABLE_EX(1b, 2b) \
15418 : : ltype(x), "m" (__m(addr)))
15419@@ -473,8 +523,12 @@ struct __large_struct { unsigned long buf[100]; };
15420 * On error, the variable @x is set to zero.
15421 */
15422
15423+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15424+#define __get_user(x, ptr) get_user((x), (ptr))
15425+#else
15426 #define __get_user(x, ptr) \
15427 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15428+#endif
15429
15430 /**
15431 * __put_user: - Write a simple value into user space, with less checking.
15432@@ -496,8 +550,12 @@ struct __large_struct { unsigned long buf[100]; };
15433 * Returns zero on success, or -EFAULT on error.
15434 */
15435
15436+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15437+#define __put_user(x, ptr) put_user((x), (ptr))
15438+#else
15439 #define __put_user(x, ptr) \
15440 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15441+#endif
15442
15443 #define __get_user_unaligned __get_user
15444 #define __put_user_unaligned __put_user
15445@@ -515,7 +573,7 @@ struct __large_struct { unsigned long buf[100]; };
15446 #define get_user_ex(x, ptr) do { \
15447 unsigned long __gue_val; \
15448 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15449- (x) = (__force __typeof__(*(ptr)))__gue_val; \
15450+ (x) = (__typeof__(*(ptr)))__gue_val; \
15451 } while (0)
15452
15453 #define put_user_try uaccess_try
15454@@ -532,8 +590,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15455 extern __must_check long strlen_user(const char __user *str);
15456 extern __must_check long strnlen_user(const char __user *str, long n);
15457
15458-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15459-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15460+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15461+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15462
15463 /*
15464 * movsl can be slow when source and dest are not both 8-byte aligned
15465diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15466index 7f760a9..04b1c65 100644
15467--- a/arch/x86/include/asm/uaccess_32.h
15468+++ b/arch/x86/include/asm/uaccess_32.h
15469@@ -11,15 +11,15 @@
15470 #include <asm/page.h>
15471
15472 unsigned long __must_check __copy_to_user_ll
15473- (void __user *to, const void *from, unsigned long n);
15474+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15475 unsigned long __must_check __copy_from_user_ll
15476- (void *to, const void __user *from, unsigned long n);
15477+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15478 unsigned long __must_check __copy_from_user_ll_nozero
15479- (void *to, const void __user *from, unsigned long n);
15480+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15481 unsigned long __must_check __copy_from_user_ll_nocache
15482- (void *to, const void __user *from, unsigned long n);
15483+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15484 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15485- (void *to, const void __user *from, unsigned long n);
15486+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15487
15488 /**
15489 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15490@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15491 static __always_inline unsigned long __must_check
15492 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15493 {
15494+ if ((long)n < 0)
15495+ return n;
15496+
15497+ check_object_size(from, n, true);
15498+
15499 if (__builtin_constant_p(n)) {
15500 unsigned long ret;
15501
15502@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15503 __copy_to_user(void __user *to, const void *from, unsigned long n)
15504 {
15505 might_fault();
15506+
15507 return __copy_to_user_inatomic(to, from, n);
15508 }
15509
15510 static __always_inline unsigned long
15511 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15512 {
15513+ if ((long)n < 0)
15514+ return n;
15515+
15516 /* Avoid zeroing the tail if the copy fails..
15517 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15518 * but as the zeroing behaviour is only significant when n is not
15519@@ -137,6 +146,12 @@ static __always_inline unsigned long
15520 __copy_from_user(void *to, const void __user *from, unsigned long n)
15521 {
15522 might_fault();
15523+
15524+ if ((long)n < 0)
15525+ return n;
15526+
15527+ check_object_size(to, n, false);
15528+
15529 if (__builtin_constant_p(n)) {
15530 unsigned long ret;
15531
15532@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15533 const void __user *from, unsigned long n)
15534 {
15535 might_fault();
15536+
15537+ if ((long)n < 0)
15538+ return n;
15539+
15540 if (__builtin_constant_p(n)) {
15541 unsigned long ret;
15542
15543@@ -181,15 +200,19 @@ static __always_inline unsigned long
15544 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
15545 unsigned long n)
15546 {
15547- return __copy_from_user_ll_nocache_nozero(to, from, n);
15548+ if ((long)n < 0)
15549+ return n;
15550+
15551+ return __copy_from_user_ll_nocache_nozero(to, from, n);
15552 }
15553
15554-unsigned long __must_check copy_to_user(void __user *to,
15555- const void *from, unsigned long n);
15556-unsigned long __must_check _copy_from_user(void *to,
15557- const void __user *from,
15558- unsigned long n);
15559-
15560+extern void copy_to_user_overflow(void)
15561+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15562+ __compiletime_error("copy_to_user() buffer size is not provably correct")
15563+#else
15564+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
15565+#endif
15566+;
15567
15568 extern void copy_from_user_overflow(void)
15569 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15570@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
15571 #endif
15572 ;
15573
15574-static inline unsigned long __must_check copy_from_user(void *to,
15575- const void __user *from,
15576- unsigned long n)
15577+/**
15578+ * copy_to_user: - Copy a block of data into user space.
15579+ * @to: Destination address, in user space.
15580+ * @from: Source address, in kernel space.
15581+ * @n: Number of bytes to copy.
15582+ *
15583+ * Context: User context only. This function may sleep.
15584+ *
15585+ * Copy data from kernel space to user space.
15586+ *
15587+ * Returns number of bytes that could not be copied.
15588+ * On success, this will be zero.
15589+ */
15590+static inline unsigned long __must_check
15591+copy_to_user(void __user *to, const void *from, unsigned long n)
15592 {
15593- int sz = __compiletime_object_size(to);
15594+ size_t sz = __compiletime_object_size(from);
15595
15596- if (likely(sz == -1 || sz >= n))
15597- n = _copy_from_user(to, from, n);
15598- else
15599+ if (unlikely(sz != (size_t)-1 && sz < n))
15600+ copy_to_user_overflow();
15601+ else if (access_ok(VERIFY_WRITE, to, n))
15602+ n = __copy_to_user(to, from, n);
15603+ return n;
15604+}
15605+
15606+/**
15607+ * copy_from_user: - Copy a block of data from user space.
15608+ * @to: Destination address, in kernel space.
15609+ * @from: Source address, in user space.
15610+ * @n: Number of bytes to copy.
15611+ *
15612+ * Context: User context only. This function may sleep.
15613+ *
15614+ * Copy data from user space to kernel space.
15615+ *
15616+ * Returns number of bytes that could not be copied.
15617+ * On success, this will be zero.
15618+ *
15619+ * If some data could not be copied, this function will pad the copied
15620+ * data to the requested size using zero bytes.
15621+ */
15622+static inline unsigned long __must_check
15623+copy_from_user(void *to, const void __user *from, unsigned long n)
15624+{
15625+ size_t sz = __compiletime_object_size(to);
15626+
15627+ check_object_size(to, n, false);
15628+
15629+ if (unlikely(sz != (size_t)-1 && sz < n))
15630 copy_from_user_overflow();
15631-
15632+ else if (access_ok(VERIFY_READ, from, n))
15633+ n = __copy_from_user(to, from, n);
15634+ else if ((long)n > 0)
15635+ memset(to, 0, n);
15636 return n;
15637 }
15638
15639diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
15640index 142810c..747941a 100644
15641--- a/arch/x86/include/asm/uaccess_64.h
15642+++ b/arch/x86/include/asm/uaccess_64.h
15643@@ -10,6 +10,9 @@
15644 #include <asm/alternative.h>
15645 #include <asm/cpufeature.h>
15646 #include <asm/page.h>
15647+#include <asm/pgtable.h>
15648+
15649+#define set_fs(x) (current_thread_info()->addr_limit = (x))
15650
15651 /*
15652 * Copy To/From Userspace
15653@@ -17,13 +20,13 @@
15654
15655 /* Handles exceptions in both to and from, but doesn't do access_ok */
15656 __must_check unsigned long
15657-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
15658+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
15659 __must_check unsigned long
15660-copy_user_generic_string(void *to, const void *from, unsigned len);
15661+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
15662 __must_check unsigned long
15663-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
15664+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
15665
15666-static __always_inline __must_check unsigned long
15667+static __always_inline __must_check __size_overflow(3) unsigned long
15668 copy_user_generic(void *to, const void *from, unsigned len)
15669 {
15670 unsigned ret;
15671@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
15672 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
15673 "=d" (len)),
15674 "1" (to), "2" (from), "3" (len)
15675- : "memory", "rcx", "r8", "r9", "r10", "r11");
15676+ : "memory", "rcx", "r8", "r9", "r11");
15677 return ret;
15678 }
15679
15680+static __always_inline __must_check unsigned long
15681+__copy_to_user(void __user *to, const void *from, unsigned long len) __size_overflow(3);
15682+static __always_inline __must_check unsigned long
15683+__copy_from_user(void *to, const void __user *from, unsigned long len) __size_overflow(3);
15684 __must_check unsigned long
15685-_copy_to_user(void __user *to, const void *from, unsigned len);
15686-__must_check unsigned long
15687-_copy_from_user(void *to, const void __user *from, unsigned len);
15688-__must_check unsigned long
15689-copy_in_user(void __user *to, const void __user *from, unsigned len);
15690+copy_in_user(void __user *to, const void __user *from, unsigned long len) __size_overflow(3);
15691+
15692+extern void copy_to_user_overflow(void)
15693+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15694+ __compiletime_error("copy_to_user() buffer size is not provably correct")
15695+#else
15696+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
15697+#endif
15698+;
15699+
15700+extern void copy_from_user_overflow(void)
15701+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
15702+ __compiletime_error("copy_from_user() buffer size is not provably correct")
15703+#else
15704+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
15705+#endif
15706+;
15707
15708 static inline unsigned long __must_check copy_from_user(void *to,
15709 const void __user *from,
15710 unsigned long n)
15711 {
15712- int sz = __compiletime_object_size(to);
15713-
15714 might_fault();
15715- if (likely(sz == -1 || sz >= n))
15716- n = _copy_from_user(to, from, n);
15717-#ifdef CONFIG_DEBUG_VM
15718- else
15719- WARN(1, "Buffer overflow detected!\n");
15720-#endif
15721+
15722+ check_object_size(to, n, false);
15723+
15724+ if (access_ok(VERIFY_READ, from, n))
15725+ n = __copy_from_user(to, from, n);
15726+ else if (n < INT_MAX)
15727+ memset(to, 0, n);
15728 return n;
15729 }
15730
15731 static __always_inline __must_check
15732-int copy_to_user(void __user *dst, const void *src, unsigned size)
15733+int copy_to_user(void __user *dst, const void *src, unsigned long size)
15734 {
15735 might_fault();
15736
15737- return _copy_to_user(dst, src, size);
15738+ if (access_ok(VERIFY_WRITE, dst, size))
15739+ size = __copy_to_user(dst, src, size);
15740+ return size;
15741 }
15742
15743 static __always_inline __must_check
15744-int __copy_from_user(void *dst, const void __user *src, unsigned size)
15745+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
15746 {
15747- int ret = 0;
15748+ size_t sz = __compiletime_object_size(dst);
15749+ unsigned ret = 0;
15750
15751 might_fault();
15752+
15753+ if (size > INT_MAX)
15754+ return size;
15755+
15756+ check_object_size(dst, size, false);
15757+
15758+#ifdef CONFIG_PAX_MEMORY_UDEREF
15759+ if (!__access_ok(VERIFY_READ, src, size))
15760+ return size;
15761+#endif
15762+
15763+ if (unlikely(sz != (size_t)-1 && sz < size)) {
15764+ copy_from_user_overflow();
15765+ return size;
15766+ }
15767+
15768 if (!__builtin_constant_p(size))
15769- return copy_user_generic(dst, (__force void *)src, size);
15770+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15771 switch (size) {
15772- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
15773+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
15774 ret, "b", "b", "=q", 1);
15775 return ret;
15776- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
15777+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
15778 ret, "w", "w", "=r", 2);
15779 return ret;
15780- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
15781+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
15782 ret, "l", "k", "=r", 4);
15783 return ret;
15784- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
15785+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15786 ret, "q", "", "=r", 8);
15787 return ret;
15788 case 10:
15789- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
15790+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15791 ret, "q", "", "=r", 10);
15792 if (unlikely(ret))
15793 return ret;
15794 __get_user_asm(*(u16 *)(8 + (char *)dst),
15795- (u16 __user *)(8 + (char __user *)src),
15796+ (const u16 __user *)(8 + (const char __user *)src),
15797 ret, "w", "w", "=r", 2);
15798 return ret;
15799 case 16:
15800- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
15801+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
15802 ret, "q", "", "=r", 16);
15803 if (unlikely(ret))
15804 return ret;
15805 __get_user_asm(*(u64 *)(8 + (char *)dst),
15806- (u64 __user *)(8 + (char __user *)src),
15807+ (const u64 __user *)(8 + (const char __user *)src),
15808 ret, "q", "", "=r", 8);
15809 return ret;
15810 default:
15811- return copy_user_generic(dst, (__force void *)src, size);
15812+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15813 }
15814 }
15815
15816 static __always_inline __must_check
15817-int __copy_to_user(void __user *dst, const void *src, unsigned size)
15818+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
15819 {
15820- int ret = 0;
15821+ size_t sz = __compiletime_object_size(src);
15822+ unsigned ret = 0;
15823
15824 might_fault();
15825+
15826+ if (size > INT_MAX)
15827+ return size;
15828+
15829+ check_object_size(src, size, true);
15830+
15831+#ifdef CONFIG_PAX_MEMORY_UDEREF
15832+ if (!__access_ok(VERIFY_WRITE, dst, size))
15833+ return size;
15834+#endif
15835+
15836+ if (unlikely(sz != (size_t)-1 && sz < size)) {
15837+ copy_to_user_overflow();
15838+ return size;
15839+ }
15840+
15841 if (!__builtin_constant_p(size))
15842- return copy_user_generic((__force void *)dst, src, size);
15843+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15844 switch (size) {
15845- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
15846+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
15847 ret, "b", "b", "iq", 1);
15848 return ret;
15849- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
15850+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
15851 ret, "w", "w", "ir", 2);
15852 return ret;
15853- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
15854+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
15855 ret, "l", "k", "ir", 4);
15856 return ret;
15857- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
15858+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15859 ret, "q", "", "er", 8);
15860 return ret;
15861 case 10:
15862- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
15863+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15864 ret, "q", "", "er", 10);
15865 if (unlikely(ret))
15866 return ret;
15867 asm("":::"memory");
15868- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
15869+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
15870 ret, "w", "w", "ir", 2);
15871 return ret;
15872 case 16:
15873- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
15874+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
15875 ret, "q", "", "er", 16);
15876 if (unlikely(ret))
15877 return ret;
15878 asm("":::"memory");
15879- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
15880+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
15881 ret, "q", "", "er", 8);
15882 return ret;
15883 default:
15884- return copy_user_generic((__force void *)dst, src, size);
15885+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15886 }
15887 }
15888
15889 static __always_inline __must_check
15890-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15891+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
15892 {
15893- int ret = 0;
15894+ unsigned ret = 0;
15895
15896 might_fault();
15897+
15898+ if (size > INT_MAX)
15899+ return size;
15900+
15901+#ifdef CONFIG_PAX_MEMORY_UDEREF
15902+ if (!__access_ok(VERIFY_READ, src, size))
15903+ return size;
15904+ if (!__access_ok(VERIFY_WRITE, dst, size))
15905+ return size;
15906+#endif
15907+
15908 if (!__builtin_constant_p(size))
15909- return copy_user_generic((__force void *)dst,
15910- (__force void *)src, size);
15911+ return copy_user_generic((__force_kernel void *)____m(dst),
15912+ (__force_kernel const void *)____m(src), size);
15913 switch (size) {
15914 case 1: {
15915 u8 tmp;
15916- __get_user_asm(tmp, (u8 __user *)src,
15917+ __get_user_asm(tmp, (const u8 __user *)src,
15918 ret, "b", "b", "=q", 1);
15919 if (likely(!ret))
15920 __put_user_asm(tmp, (u8 __user *)dst,
15921@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15922 }
15923 case 2: {
15924 u16 tmp;
15925- __get_user_asm(tmp, (u16 __user *)src,
15926+ __get_user_asm(tmp, (const u16 __user *)src,
15927 ret, "w", "w", "=r", 2);
15928 if (likely(!ret))
15929 __put_user_asm(tmp, (u16 __user *)dst,
15930@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15931
15932 case 4: {
15933 u32 tmp;
15934- __get_user_asm(tmp, (u32 __user *)src,
15935+ __get_user_asm(tmp, (const u32 __user *)src,
15936 ret, "l", "k", "=r", 4);
15937 if (likely(!ret))
15938 __put_user_asm(tmp, (u32 __user *)dst,
15939@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15940 }
15941 case 8: {
15942 u64 tmp;
15943- __get_user_asm(tmp, (u64 __user *)src,
15944+ __get_user_asm(tmp, (const u64 __user *)src,
15945 ret, "q", "", "=r", 8);
15946 if (likely(!ret))
15947 __put_user_asm(tmp, (u64 __user *)dst,
15948@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
15949 return ret;
15950 }
15951 default:
15952- return copy_user_generic((__force void *)dst,
15953- (__force void *)src, size);
15954+ return copy_user_generic((__force_kernel void *)____m(dst),
15955+ (__force_kernel const void *)____m(src), size);
15956 }
15957 }
15958
15959 static __must_check __always_inline int
15960-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
15961+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
15962 {
15963- return copy_user_generic(dst, (__force const void *)src, size);
15964+ if (size > INT_MAX)
15965+ return size;
15966+
15967+#ifdef CONFIG_PAX_MEMORY_UDEREF
15968+ if (!__access_ok(VERIFY_READ, src, size))
15969+ return size;
15970+#endif
15971+
15972+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
15973 }
15974
15975-static __must_check __always_inline int
15976-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
15977+static __must_check __always_inline unsigned long
15978+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
15979 {
15980- return copy_user_generic((__force void *)dst, src, size);
15981+ if (size > INT_MAX)
15982+ return size;
15983+
15984+#ifdef CONFIG_PAX_MEMORY_UDEREF
15985+ if (!__access_ok(VERIFY_WRITE, dst, size))
15986+ return size;
15987+#endif
15988+
15989+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
15990 }
15991
15992-extern long __copy_user_nocache(void *dst, const void __user *src,
15993- unsigned size, int zerorest);
15994+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
15995+ unsigned long size, int zerorest) __size_overflow(3);
15996
15997-static inline int
15998-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
15999+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16000 {
16001 might_sleep();
16002+
16003+ if (size > INT_MAX)
16004+ return size;
16005+
16006+#ifdef CONFIG_PAX_MEMORY_UDEREF
16007+ if (!__access_ok(VERIFY_READ, src, size))
16008+ return size;
16009+#endif
16010+
16011 return __copy_user_nocache(dst, src, size, 1);
16012 }
16013
16014-static inline int
16015-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16016- unsigned size)
16017+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16018+ unsigned long size)
16019 {
16020+ if (size > INT_MAX)
16021+ return size;
16022+
16023+#ifdef CONFIG_PAX_MEMORY_UDEREF
16024+ if (!__access_ok(VERIFY_READ, src, size))
16025+ return size;
16026+#endif
16027+
16028 return __copy_user_nocache(dst, src, size, 0);
16029 }
16030
16031-unsigned long
16032-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16033+extern unsigned long
16034+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16035
16036 #endif /* _ASM_X86_UACCESS_64_H */
16037diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16038index 5b238981..77fdd78 100644
16039--- a/arch/x86/include/asm/word-at-a-time.h
16040+++ b/arch/x86/include/asm/word-at-a-time.h
16041@@ -11,7 +11,7 @@
16042 * and shift, for example.
16043 */
16044 struct word_at_a_time {
16045- const unsigned long one_bits, high_bits;
16046+ unsigned long one_bits, high_bits;
16047 };
16048
16049 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16050diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16051index 5769349..a3d3e2a 100644
16052--- a/arch/x86/include/asm/x86_init.h
16053+++ b/arch/x86/include/asm/x86_init.h
16054@@ -141,7 +141,7 @@ struct x86_init_ops {
16055 struct x86_init_timers timers;
16056 struct x86_init_iommu iommu;
16057 struct x86_init_pci pci;
16058-};
16059+} __no_const;
16060
16061 /**
16062 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16063@@ -152,7 +152,7 @@ struct x86_cpuinit_ops {
16064 void (*setup_percpu_clockev)(void);
16065 void (*early_percpu_clock_init)(void);
16066 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16067-};
16068+} __no_const;
16069
16070 /**
16071 * struct x86_platform_ops - platform specific runtime functions
16072@@ -178,7 +178,7 @@ struct x86_platform_ops {
16073 void (*save_sched_clock_state)(void);
16074 void (*restore_sched_clock_state)(void);
16075 void (*apic_post_init)(void);
16076-};
16077+} __no_const;
16078
16079 struct pci_dev;
16080
16081@@ -187,14 +187,14 @@ struct x86_msi_ops {
16082 void (*teardown_msi_irq)(unsigned int irq);
16083 void (*teardown_msi_irqs)(struct pci_dev *dev);
16084 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16085-};
16086+} __no_const;
16087
16088 struct x86_io_apic_ops {
16089 void (*init) (void);
16090 unsigned int (*read) (unsigned int apic, unsigned int reg);
16091 void (*write) (unsigned int apic, unsigned int reg, unsigned int value);
16092 void (*modify)(unsigned int apic, unsigned int reg, unsigned int value);
16093-};
16094+} __no_const;
16095
16096 extern struct x86_init_ops x86_init;
16097 extern struct x86_cpuinit_ops x86_cpuinit;
16098diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16099index 0415cda..b43d877 100644
16100--- a/arch/x86/include/asm/xsave.h
16101+++ b/arch/x86/include/asm/xsave.h
16102@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16103 return -EFAULT;
16104
16105 __asm__ __volatile__(ASM_STAC "\n"
16106- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16107+ "1:"
16108+ __copyuser_seg
16109+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16110 "2: " ASM_CLAC "\n"
16111 ".section .fixup,\"ax\"\n"
16112 "3: movl $-1,%[err]\n"
16113@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16114 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16115 {
16116 int err;
16117- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16118+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16119 u32 lmask = mask;
16120 u32 hmask = mask >> 32;
16121
16122 __asm__ __volatile__(ASM_STAC "\n"
16123- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16124+ "1:"
16125+ __copyuser_seg
16126+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16127 "2: " ASM_CLAC "\n"
16128 ".section .fixup,\"ax\"\n"
16129 "3: movl $-1,%[err]\n"
16130diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16131index bbae024..e1528f9 100644
16132--- a/arch/x86/include/uapi/asm/e820.h
16133+++ b/arch/x86/include/uapi/asm/e820.h
16134@@ -63,7 +63,7 @@ struct e820map {
16135 #define ISA_START_ADDRESS 0xa0000
16136 #define ISA_END_ADDRESS 0x100000
16137
16138-#define BIOS_BEGIN 0x000a0000
16139+#define BIOS_BEGIN 0x000c0000
16140 #define BIOS_END 0x00100000
16141
16142 #define BIOS_ROM_BASE 0xffe00000
16143diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16144index 34e923a..0c6bb6e 100644
16145--- a/arch/x86/kernel/Makefile
16146+++ b/arch/x86/kernel/Makefile
16147@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16148 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16149 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16150 obj-y += probe_roms.o
16151-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16152+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16153 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16154 obj-y += syscall_$(BITS).o
16155 obj-$(CONFIG_X86_64) += vsyscall_64.o
16156diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16157index bacf4b0..4ede72e 100644
16158--- a/arch/x86/kernel/acpi/boot.c
16159+++ b/arch/x86/kernel/acpi/boot.c
16160@@ -1358,7 +1358,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16161 * If your system is blacklisted here, but you find that acpi=force
16162 * works for you, please contact linux-acpi@vger.kernel.org
16163 */
16164-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16165+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16166 /*
16167 * Boxes that need ACPI disabled
16168 */
16169@@ -1433,7 +1433,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16170 };
16171
16172 /* second table for DMI checks that should run after early-quirks */
16173-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16174+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16175 /*
16176 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16177 * which includes some code which overrides all temperature
16178diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16179index d5e0d71..6533e08 100644
16180--- a/arch/x86/kernel/acpi/sleep.c
16181+++ b/arch/x86/kernel/acpi/sleep.c
16182@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16183 #else /* CONFIG_64BIT */
16184 #ifdef CONFIG_SMP
16185 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16186+
16187+ pax_open_kernel();
16188 early_gdt_descr.address =
16189 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16190+ pax_close_kernel();
16191+
16192 initial_gs = per_cpu_offset(smp_processor_id());
16193 #endif
16194 initial_code = (unsigned long)wakeup_long64;
16195diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16196index 13ab720..95d5442 100644
16197--- a/arch/x86/kernel/acpi/wakeup_32.S
16198+++ b/arch/x86/kernel/acpi/wakeup_32.S
16199@@ -30,13 +30,11 @@ wakeup_pmode_return:
16200 # and restore the stack ... but you need gdt for this to work
16201 movl saved_context_esp, %esp
16202
16203- movl %cs:saved_magic, %eax
16204- cmpl $0x12345678, %eax
16205+ cmpl $0x12345678, saved_magic
16206 jne bogus_magic
16207
16208 # jump to place where we left off
16209- movl saved_eip, %eax
16210- jmp *%eax
16211+ jmp *(saved_eip)
16212
16213 bogus_magic:
16214 jmp bogus_magic
16215diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16216index ef5ccca..bd83949 100644
16217--- a/arch/x86/kernel/alternative.c
16218+++ b/arch/x86/kernel/alternative.c
16219@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16220 */
16221 for (a = start; a < end; a++) {
16222 instr = (u8 *)&a->instr_offset + a->instr_offset;
16223+
16224+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16225+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16226+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16227+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16228+#endif
16229+
16230 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16231 BUG_ON(a->replacementlen > a->instrlen);
16232 BUG_ON(a->instrlen > sizeof(insnbuf));
16233@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16234 for (poff = start; poff < end; poff++) {
16235 u8 *ptr = (u8 *)poff + *poff;
16236
16237+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16238+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16239+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16240+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16241+#endif
16242+
16243 if (!*poff || ptr < text || ptr >= text_end)
16244 continue;
16245 /* turn DS segment override prefix into lock prefix */
16246- if (*ptr == 0x3e)
16247+ if (*ktla_ktva(ptr) == 0x3e)
16248 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16249 }
16250 mutex_unlock(&text_mutex);
16251@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16252 for (poff = start; poff < end; poff++) {
16253 u8 *ptr = (u8 *)poff + *poff;
16254
16255+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16256+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16257+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16258+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16259+#endif
16260+
16261 if (!*poff || ptr < text || ptr >= text_end)
16262 continue;
16263 /* turn lock prefix into DS segment override prefix */
16264- if (*ptr == 0xf0)
16265+ if (*ktla_ktva(ptr) == 0xf0)
16266 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16267 }
16268 mutex_unlock(&text_mutex);
16269@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16270
16271 BUG_ON(p->len > MAX_PATCH_LEN);
16272 /* prep the buffer with the original instructions */
16273- memcpy(insnbuf, p->instr, p->len);
16274+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16275 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16276 (unsigned long)p->instr, p->len);
16277
16278@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16279 if (!uniproc_patched || num_possible_cpus() == 1)
16280 free_init_pages("SMP alternatives",
16281 (unsigned long)__smp_locks,
16282- (unsigned long)__smp_locks_end);
16283+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16284 #endif
16285
16286 apply_paravirt(__parainstructions, __parainstructions_end);
16287@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16288 * instructions. And on the local CPU you need to be protected again NMI or MCE
16289 * handlers seeing an inconsistent instruction while you patch.
16290 */
16291-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16292+void *__kprobes text_poke_early(void *addr, const void *opcode,
16293 size_t len)
16294 {
16295 unsigned long flags;
16296 local_irq_save(flags);
16297- memcpy(addr, opcode, len);
16298+
16299+ pax_open_kernel();
16300+ memcpy(ktla_ktva(addr), opcode, len);
16301 sync_core();
16302+ pax_close_kernel();
16303+
16304 local_irq_restore(flags);
16305 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16306 that causes hangs on some VIA CPUs. */
16307@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16308 */
16309 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16310 {
16311- unsigned long flags;
16312- char *vaddr;
16313+ unsigned char *vaddr = ktla_ktva(addr);
16314 struct page *pages[2];
16315- int i;
16316+ size_t i;
16317
16318 if (!core_kernel_text((unsigned long)addr)) {
16319- pages[0] = vmalloc_to_page(addr);
16320- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16321+ pages[0] = vmalloc_to_page(vaddr);
16322+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16323 } else {
16324- pages[0] = virt_to_page(addr);
16325+ pages[0] = virt_to_page(vaddr);
16326 WARN_ON(!PageReserved(pages[0]));
16327- pages[1] = virt_to_page(addr + PAGE_SIZE);
16328+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16329 }
16330 BUG_ON(!pages[0]);
16331- local_irq_save(flags);
16332- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16333- if (pages[1])
16334- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16335- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16336- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16337- clear_fixmap(FIX_TEXT_POKE0);
16338- if (pages[1])
16339- clear_fixmap(FIX_TEXT_POKE1);
16340- local_flush_tlb();
16341- sync_core();
16342- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16343- that causes hangs on some VIA CPUs. */
16344+ text_poke_early(addr, opcode, len);
16345 for (i = 0; i < len; i++)
16346- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16347- local_irq_restore(flags);
16348+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16349 return addr;
16350 }
16351
16352diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16353index cbf5121..812b537 100644
16354--- a/arch/x86/kernel/apic/apic.c
16355+++ b/arch/x86/kernel/apic/apic.c
16356@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16357 /*
16358 * Debug level, exported for io_apic.c
16359 */
16360-unsigned int apic_verbosity;
16361+int apic_verbosity;
16362
16363 int pic_mode;
16364
16365@@ -1956,7 +1956,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16366 apic_write(APIC_ESR, 0);
16367 v1 = apic_read(APIC_ESR);
16368 ack_APIC_irq();
16369- atomic_inc(&irq_err_count);
16370+ atomic_inc_unchecked(&irq_err_count);
16371
16372 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16373 smp_processor_id(), v0 , v1);
16374diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16375index 00c77cf..2dc6a2d 100644
16376--- a/arch/x86/kernel/apic/apic_flat_64.c
16377+++ b/arch/x86/kernel/apic/apic_flat_64.c
16378@@ -157,7 +157,7 @@ static int flat_probe(void)
16379 return 1;
16380 }
16381
16382-static struct apic apic_flat = {
16383+static struct apic apic_flat __read_only = {
16384 .name = "flat",
16385 .probe = flat_probe,
16386 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16387@@ -271,7 +271,7 @@ static int physflat_probe(void)
16388 return 0;
16389 }
16390
16391-static struct apic apic_physflat = {
16392+static struct apic apic_physflat __read_only = {
16393
16394 .name = "physical flat",
16395 .probe = physflat_probe,
16396diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16397index e145f28..2752888 100644
16398--- a/arch/x86/kernel/apic/apic_noop.c
16399+++ b/arch/x86/kernel/apic/apic_noop.c
16400@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16401 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16402 }
16403
16404-struct apic apic_noop = {
16405+struct apic apic_noop __read_only = {
16406 .name = "noop",
16407 .probe = noop_probe,
16408 .acpi_madt_oem_check = NULL,
16409diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16410index d50e364..543bee3 100644
16411--- a/arch/x86/kernel/apic/bigsmp_32.c
16412+++ b/arch/x86/kernel/apic/bigsmp_32.c
16413@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16414 return dmi_bigsmp;
16415 }
16416
16417-static struct apic apic_bigsmp = {
16418+static struct apic apic_bigsmp __read_only = {
16419
16420 .name = "bigsmp",
16421 .probe = probe_bigsmp,
16422diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16423index 0874799..a7a7892 100644
16424--- a/arch/x86/kernel/apic/es7000_32.c
16425+++ b/arch/x86/kernel/apic/es7000_32.c
16426@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16427 return ret && es7000_apic_is_cluster();
16428 }
16429
16430-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16431-static struct apic __refdata apic_es7000_cluster = {
16432+static struct apic apic_es7000_cluster __read_only = {
16433
16434 .name = "es7000",
16435 .probe = probe_es7000,
16436@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16437 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16438 };
16439
16440-static struct apic __refdata apic_es7000 = {
16441+static struct apic apic_es7000 __read_only = {
16442
16443 .name = "es7000",
16444 .probe = probe_es7000,
16445diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16446index b739d39..aebc14c 100644
16447--- a/arch/x86/kernel/apic/io_apic.c
16448+++ b/arch/x86/kernel/apic/io_apic.c
16449@@ -1084,7 +1084,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16450 }
16451 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16452
16453-void lock_vector_lock(void)
16454+void lock_vector_lock(void) __acquires(vector_lock)
16455 {
16456 /* Used to the online set of cpus does not change
16457 * during assign_irq_vector.
16458@@ -1092,7 +1092,7 @@ void lock_vector_lock(void)
16459 raw_spin_lock(&vector_lock);
16460 }
16461
16462-void unlock_vector_lock(void)
16463+void unlock_vector_lock(void) __releases(vector_lock)
16464 {
16465 raw_spin_unlock(&vector_lock);
16466 }
16467@@ -2399,7 +2399,7 @@ static void ack_apic_edge(struct irq_data *data)
16468 ack_APIC_irq();
16469 }
16470
16471-atomic_t irq_mis_count;
16472+atomic_unchecked_t irq_mis_count;
16473
16474 #ifdef CONFIG_GENERIC_PENDING_IRQ
16475 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16476@@ -2540,7 +2540,7 @@ static void ack_apic_level(struct irq_data *data)
16477 * at the cpu.
16478 */
16479 if (!(v & (1 << (i & 0x1f)))) {
16480- atomic_inc(&irq_mis_count);
16481+ atomic_inc_unchecked(&irq_mis_count);
16482
16483 eoi_ioapic_irq(irq, cfg);
16484 }
16485@@ -2567,11 +2567,13 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
16486
16487 static void irq_remap_modify_chip_defaults(struct irq_chip *chip)
16488 {
16489- chip->irq_print_chip = ir_print_prefix;
16490- chip->irq_ack = ir_ack_apic_edge;
16491- chip->irq_eoi = ir_ack_apic_level;
16492+ pax_open_kernel();
16493+ *(void **)&chip->irq_print_chip = ir_print_prefix;
16494+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
16495+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
16496
16497- chip->irq_set_affinity = set_remapped_irq_affinity;
16498+ *(void **)&chip->irq_set_affinity = set_remapped_irq_affinity;
16499+ pax_close_kernel();
16500 }
16501 #endif /* CONFIG_IRQ_REMAP */
16502
16503diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16504index d661ee9..791fd33 100644
16505--- a/arch/x86/kernel/apic/numaq_32.c
16506+++ b/arch/x86/kernel/apic/numaq_32.c
16507@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16508 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16509 }
16510
16511-/* Use __refdata to keep false positive warning calm. */
16512-static struct apic __refdata apic_numaq = {
16513+static struct apic apic_numaq __read_only = {
16514
16515 .name = "NUMAQ",
16516 .probe = probe_numaq,
16517diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16518index eb35ef9..f184a21 100644
16519--- a/arch/x86/kernel/apic/probe_32.c
16520+++ b/arch/x86/kernel/apic/probe_32.c
16521@@ -72,7 +72,7 @@ static int probe_default(void)
16522 return 1;
16523 }
16524
16525-static struct apic apic_default = {
16526+static struct apic apic_default __read_only = {
16527
16528 .name = "default",
16529 .probe = probe_default,
16530diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16531index 77c95c0..434f8a4 100644
16532--- a/arch/x86/kernel/apic/summit_32.c
16533+++ b/arch/x86/kernel/apic/summit_32.c
16534@@ -486,7 +486,7 @@ void setup_summit(void)
16535 }
16536 #endif
16537
16538-static struct apic apic_summit = {
16539+static struct apic apic_summit __read_only = {
16540
16541 .name = "summit",
16542 .probe = probe_summit,
16543diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16544index c88baa4..757aee1 100644
16545--- a/arch/x86/kernel/apic/x2apic_cluster.c
16546+++ b/arch/x86/kernel/apic/x2apic_cluster.c
16547@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16548 return notifier_from_errno(err);
16549 }
16550
16551-static struct notifier_block __refdata x2apic_cpu_notifier = {
16552+static struct notifier_block x2apic_cpu_notifier = {
16553 .notifier_call = update_clusterinfo,
16554 };
16555
16556@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
16557 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
16558 }
16559
16560-static struct apic apic_x2apic_cluster = {
16561+static struct apic apic_x2apic_cluster __read_only = {
16562
16563 .name = "cluster x2apic",
16564 .probe = x2apic_cluster_probe,
16565diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
16566index 562a76d..a003c0f 100644
16567--- a/arch/x86/kernel/apic/x2apic_phys.c
16568+++ b/arch/x86/kernel/apic/x2apic_phys.c
16569@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
16570 return apic == &apic_x2apic_phys;
16571 }
16572
16573-static struct apic apic_x2apic_phys = {
16574+static struct apic apic_x2apic_phys __read_only = {
16575
16576 .name = "physical x2apic",
16577 .probe = x2apic_phys_probe,
16578diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
16579index 8cfade9..b9d04fc 100644
16580--- a/arch/x86/kernel/apic/x2apic_uv_x.c
16581+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
16582@@ -333,7 +333,7 @@ static int uv_probe(void)
16583 return apic == &apic_x2apic_uv_x;
16584 }
16585
16586-static struct apic __refdata apic_x2apic_uv_x = {
16587+static struct apic apic_x2apic_uv_x __read_only = {
16588
16589 .name = "UV large system",
16590 .probe = uv_probe,
16591diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
16592index d65464e..1035d31 100644
16593--- a/arch/x86/kernel/apm_32.c
16594+++ b/arch/x86/kernel/apm_32.c
16595@@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
16596 * This is for buggy BIOS's that refer to (real mode) segment 0x40
16597 * even though they are called in protected mode.
16598 */
16599-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
16600+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
16601 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
16602
16603 static const char driver_version[] = "1.16ac"; /* no spaces */
16604@@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
16605 BUG_ON(cpu != 0);
16606 gdt = get_cpu_gdt_table(cpu);
16607 save_desc_40 = gdt[0x40 / 8];
16608+
16609+ pax_open_kernel();
16610 gdt[0x40 / 8] = bad_bios_desc;
16611+ pax_close_kernel();
16612
16613 apm_irq_save(flags);
16614 APM_DO_SAVE_SEGS;
16615@@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
16616 &call->esi);
16617 APM_DO_RESTORE_SEGS;
16618 apm_irq_restore(flags);
16619+
16620+ pax_open_kernel();
16621 gdt[0x40 / 8] = save_desc_40;
16622+ pax_close_kernel();
16623+
16624 put_cpu();
16625
16626 return call->eax & 0xff;
16627@@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void *_call)
16628 BUG_ON(cpu != 0);
16629 gdt = get_cpu_gdt_table(cpu);
16630 save_desc_40 = gdt[0x40 / 8];
16631+
16632+ pax_open_kernel();
16633 gdt[0x40 / 8] = bad_bios_desc;
16634+ pax_close_kernel();
16635
16636 apm_irq_save(flags);
16637 APM_DO_SAVE_SEGS;
16638@@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void *_call)
16639 &call->eax);
16640 APM_DO_RESTORE_SEGS;
16641 apm_irq_restore(flags);
16642+
16643+ pax_open_kernel();
16644 gdt[0x40 / 8] = save_desc_40;
16645+ pax_close_kernel();
16646+
16647 put_cpu();
16648 return error;
16649 }
16650@@ -2345,12 +2359,15 @@ static int __init apm_init(void)
16651 * code to that CPU.
16652 */
16653 gdt = get_cpu_gdt_table(0);
16654+
16655+ pax_open_kernel();
16656 set_desc_base(&gdt[APM_CS >> 3],
16657 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
16658 set_desc_base(&gdt[APM_CS_16 >> 3],
16659 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
16660 set_desc_base(&gdt[APM_DS >> 3],
16661 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
16662+ pax_close_kernel();
16663
16664 proc_create("apm", 0, NULL, &apm_file_ops);
16665
16666diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
16667index 2861082..6d4718e 100644
16668--- a/arch/x86/kernel/asm-offsets.c
16669+++ b/arch/x86/kernel/asm-offsets.c
16670@@ -33,6 +33,8 @@ void common(void) {
16671 OFFSET(TI_status, thread_info, status);
16672 OFFSET(TI_addr_limit, thread_info, addr_limit);
16673 OFFSET(TI_preempt_count, thread_info, preempt_count);
16674+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
16675+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
16676
16677 BLANK();
16678 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
16679@@ -53,8 +55,26 @@ void common(void) {
16680 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
16681 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
16682 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
16683+
16684+#ifdef CONFIG_PAX_KERNEXEC
16685+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
16686 #endif
16687
16688+#ifdef CONFIG_PAX_MEMORY_UDEREF
16689+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
16690+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
16691+#ifdef CONFIG_X86_64
16692+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
16693+#endif
16694+#endif
16695+
16696+#endif
16697+
16698+ BLANK();
16699+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
16700+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
16701+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
16702+
16703 #ifdef CONFIG_XEN
16704 BLANK();
16705 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
16706diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
16707index 1b4754f..fbb4227 100644
16708--- a/arch/x86/kernel/asm-offsets_64.c
16709+++ b/arch/x86/kernel/asm-offsets_64.c
16710@@ -76,6 +76,7 @@ int main(void)
16711 BLANK();
16712 #undef ENTRY
16713
16714+ DEFINE(TSS_size, sizeof(struct tss_struct));
16715 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
16716 BLANK();
16717
16718diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
16719index a0e067d..9c7db16 100644
16720--- a/arch/x86/kernel/cpu/Makefile
16721+++ b/arch/x86/kernel/cpu/Makefile
16722@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
16723 CFLAGS_REMOVE_perf_event.o = -pg
16724 endif
16725
16726-# Make sure load_percpu_segment has no stackprotector
16727-nostackp := $(call cc-option, -fno-stack-protector)
16728-CFLAGS_common.o := $(nostackp)
16729-
16730 obj-y := intel_cacheinfo.o scattered.o topology.o
16731 obj-y += proc.o capflags.o powerflags.o common.o
16732 obj-y += vmware.o hypervisor.o mshyperv.o
16733diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
16734index 15239ff..e23e04e 100644
16735--- a/arch/x86/kernel/cpu/amd.c
16736+++ b/arch/x86/kernel/cpu/amd.c
16737@@ -733,7 +733,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
16738 unsigned int size)
16739 {
16740 /* AMD errata T13 (order #21922) */
16741- if ((c->x86 == 6)) {
16742+ if (c->x86 == 6) {
16743 /* Duron Rev A0 */
16744 if (c->x86_model == 3 && c->x86_mask == 0)
16745 size = 64;
16746diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
16747index 9c3ab43..51e6366 100644
16748--- a/arch/x86/kernel/cpu/common.c
16749+++ b/arch/x86/kernel/cpu/common.c
16750@@ -86,60 +86,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
16751
16752 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
16753
16754-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
16755-#ifdef CONFIG_X86_64
16756- /*
16757- * We need valid kernel segments for data and code in long mode too
16758- * IRET will check the segment types kkeil 2000/10/28
16759- * Also sysret mandates a special GDT layout
16760- *
16761- * TLS descriptors are currently at a different place compared to i386.
16762- * Hopefully nobody expects them at a fixed place (Wine?)
16763- */
16764- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
16765- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
16766- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
16767- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
16768- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
16769- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
16770-#else
16771- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
16772- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16773- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
16774- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
16775- /*
16776- * Segments used for calling PnP BIOS have byte granularity.
16777- * They code segments and data segments have fixed 64k limits,
16778- * the transfer segment sizes are set at run time.
16779- */
16780- /* 32-bit code */
16781- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
16782- /* 16-bit code */
16783- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
16784- /* 16-bit data */
16785- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
16786- /* 16-bit data */
16787- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
16788- /* 16-bit data */
16789- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
16790- /*
16791- * The APM segments have byte granularity and their bases
16792- * are set at run time. All have 64k limits.
16793- */
16794- /* 32-bit code */
16795- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
16796- /* 16-bit code */
16797- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
16798- /* data */
16799- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
16800-
16801- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16802- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
16803- GDT_STACK_CANARY_INIT
16804-#endif
16805-} };
16806-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
16807-
16808 static int __init x86_xsave_setup(char *s)
16809 {
16810 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
16811@@ -389,7 +335,7 @@ void switch_to_new_gdt(int cpu)
16812 {
16813 struct desc_ptr gdt_descr;
16814
16815- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
16816+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16817 gdt_descr.size = GDT_SIZE - 1;
16818 load_gdt(&gdt_descr);
16819 /* Reload the per-cpu base */
16820@@ -885,6 +831,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
16821 /* Filter out anything that depends on CPUID levels we don't have */
16822 filter_cpuid_features(c, true);
16823
16824+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
16825+ setup_clear_cpu_cap(X86_FEATURE_SEP);
16826+#endif
16827+
16828 /* If the model name is still unset, do table lookup. */
16829 if (!c->x86_model_id[0]) {
16830 const char *p;
16831@@ -1068,10 +1018,12 @@ static __init int setup_disablecpuid(char *arg)
16832 }
16833 __setup("clearcpuid=", setup_disablecpuid);
16834
16835+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
16836+EXPORT_PER_CPU_SYMBOL(current_tinfo);
16837+
16838 #ifdef CONFIG_X86_64
16839 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
16840-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
16841- (unsigned long) nmi_idt_table };
16842+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
16843
16844 DEFINE_PER_CPU_FIRST(union irq_stack_union,
16845 irq_stack_union) __aligned(PAGE_SIZE);
16846@@ -1085,7 +1037,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
16847 EXPORT_PER_CPU_SYMBOL(current_task);
16848
16849 DEFINE_PER_CPU(unsigned long, kernel_stack) =
16850- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
16851+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
16852 EXPORT_PER_CPU_SYMBOL(kernel_stack);
16853
16854 DEFINE_PER_CPU(char *, irq_stack_ptr) =
16855@@ -1224,7 +1176,7 @@ void __cpuinit cpu_init(void)
16856 int i;
16857
16858 cpu = stack_smp_processor_id();
16859- t = &per_cpu(init_tss, cpu);
16860+ t = init_tss + cpu;
16861 oist = &per_cpu(orig_ist, cpu);
16862
16863 #ifdef CONFIG_NUMA
16864@@ -1250,7 +1202,7 @@ void __cpuinit cpu_init(void)
16865 switch_to_new_gdt(cpu);
16866 loadsegment(fs, 0);
16867
16868- load_idt((const struct desc_ptr *)&idt_descr);
16869+ load_idt(&idt_descr);
16870
16871 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
16872 syscall_init();
16873@@ -1259,7 +1211,6 @@ void __cpuinit cpu_init(void)
16874 wrmsrl(MSR_KERNEL_GS_BASE, 0);
16875 barrier();
16876
16877- x86_configure_nx();
16878 enable_x2apic();
16879
16880 /*
16881@@ -1311,7 +1262,7 @@ void __cpuinit cpu_init(void)
16882 {
16883 int cpu = smp_processor_id();
16884 struct task_struct *curr = current;
16885- struct tss_struct *t = &per_cpu(init_tss, cpu);
16886+ struct tss_struct *t = init_tss + cpu;
16887 struct thread_struct *thread = &curr->thread;
16888
16889 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
16890diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
16891index fcaabd0..7b55a26 100644
16892--- a/arch/x86/kernel/cpu/intel.c
16893+++ b/arch/x86/kernel/cpu/intel.c
16894@@ -174,7 +174,7 @@ static void __cpuinit trap_init_f00f_bug(void)
16895 * Update the IDT descriptor and reload the IDT so that
16896 * it uses the read-only mapped virtual address.
16897 */
16898- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
16899+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
16900 load_idt(&idt_descr);
16901 }
16902 #endif
16903diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
16904index 84c1309..39b7224 100644
16905--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
16906+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
16907@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
16908 };
16909
16910 #ifdef CONFIG_AMD_NB
16911+static struct attribute *default_attrs_amd_nb[] = {
16912+ &type.attr,
16913+ &level.attr,
16914+ &coherency_line_size.attr,
16915+ &physical_line_partition.attr,
16916+ &ways_of_associativity.attr,
16917+ &number_of_sets.attr,
16918+ &size.attr,
16919+ &shared_cpu_map.attr,
16920+ &shared_cpu_list.attr,
16921+ NULL,
16922+ NULL,
16923+ NULL,
16924+ NULL
16925+};
16926+
16927 static struct attribute ** __cpuinit amd_l3_attrs(void)
16928 {
16929 static struct attribute **attrs;
16930@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
16931
16932 n = ARRAY_SIZE(default_attrs);
16933
16934- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
16935- n += 2;
16936-
16937- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
16938- n += 1;
16939-
16940- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
16941- if (attrs == NULL)
16942- return attrs = default_attrs;
16943-
16944- for (n = 0; default_attrs[n]; n++)
16945- attrs[n] = default_attrs[n];
16946+ attrs = default_attrs_amd_nb;
16947
16948 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
16949 attrs[n++] = &cache_disable_0.attr;
16950@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
16951 .default_attrs = default_attrs,
16952 };
16953
16954+#ifdef CONFIG_AMD_NB
16955+static struct kobj_type ktype_cache_amd_nb = {
16956+ .sysfs_ops = &sysfs_ops,
16957+ .default_attrs = default_attrs_amd_nb,
16958+};
16959+#endif
16960+
16961 static struct kobj_type ktype_percpu_entry = {
16962 .sysfs_ops = &sysfs_ops,
16963 };
16964@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
16965 return retval;
16966 }
16967
16968+#ifdef CONFIG_AMD_NB
16969+ amd_l3_attrs();
16970+#endif
16971+
16972 for (i = 0; i < num_cache_leaves; i++) {
16973+ struct kobj_type *ktype;
16974+
16975 this_object = INDEX_KOBJECT_PTR(cpu, i);
16976 this_object->cpu = cpu;
16977 this_object->index = i;
16978
16979 this_leaf = CPUID4_INFO_IDX(cpu, i);
16980
16981- ktype_cache.default_attrs = default_attrs;
16982+ ktype = &ktype_cache;
16983 #ifdef CONFIG_AMD_NB
16984 if (this_leaf->base.nb)
16985- ktype_cache.default_attrs = amd_l3_attrs();
16986+ ktype = &ktype_cache_amd_nb;
16987 #endif
16988 retval = kobject_init_and_add(&(this_object->kobj),
16989- &ktype_cache,
16990+ ktype,
16991 per_cpu(ici_cache_kobject, cpu),
16992 "index%1lu", i);
16993 if (unlikely(retval)) {
16994@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
16995 return NOTIFY_OK;
16996 }
16997
16998-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
16999+static struct notifier_block cacheinfo_cpu_notifier = {
17000 .notifier_call = cacheinfo_cpu_callback,
17001 };
17002
17003diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17004index 80dbda8..be16652 100644
17005--- a/arch/x86/kernel/cpu/mcheck/mce.c
17006+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17007@@ -45,6 +45,7 @@
17008 #include <asm/processor.h>
17009 #include <asm/mce.h>
17010 #include <asm/msr.h>
17011+#include <asm/local.h>
17012
17013 #include "mce-internal.h"
17014
17015@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17016 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17017 m->cs, m->ip);
17018
17019- if (m->cs == __KERNEL_CS)
17020+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17021 print_symbol("{%s}", m->ip);
17022 pr_cont("\n");
17023 }
17024@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17025
17026 #define PANIC_TIMEOUT 5 /* 5 seconds */
17027
17028-static atomic_t mce_paniced;
17029+static atomic_unchecked_t mce_paniced;
17030
17031 static int fake_panic;
17032-static atomic_t mce_fake_paniced;
17033+static atomic_unchecked_t mce_fake_paniced;
17034
17035 /* Panic in progress. Enable interrupts and wait for final IPI */
17036 static void wait_for_panic(void)
17037@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17038 /*
17039 * Make sure only one CPU runs in machine check panic
17040 */
17041- if (atomic_inc_return(&mce_paniced) > 1)
17042+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17043 wait_for_panic();
17044 barrier();
17045
17046@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17047 console_verbose();
17048 } else {
17049 /* Don't log too much for fake panic */
17050- if (atomic_inc_return(&mce_fake_paniced) > 1)
17051+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17052 return;
17053 }
17054 /* First print corrected ones that are still unlogged */
17055@@ -686,7 +687,7 @@ static int mce_timed_out(u64 *t)
17056 * might have been modified by someone else.
17057 */
17058 rmb();
17059- if (atomic_read(&mce_paniced))
17060+ if (atomic_read_unchecked(&mce_paniced))
17061 wait_for_panic();
17062 if (!mca_cfg.monarch_timeout)
17063 goto out;
17064@@ -1662,7 +1663,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17065 }
17066
17067 /* Call the installed machine check handler for this CPU setup. */
17068-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17069+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17070 unexpected_machine_check;
17071
17072 /*
17073@@ -1685,7 +1686,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17074 return;
17075 }
17076
17077+ pax_open_kernel();
17078 machine_check_vector = do_machine_check;
17079+ pax_close_kernel();
17080
17081 __mcheck_cpu_init_generic();
17082 __mcheck_cpu_init_vendor(c);
17083@@ -1699,7 +1702,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17084 */
17085
17086 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17087-static int mce_chrdev_open_count; /* #times opened */
17088+static local_t mce_chrdev_open_count; /* #times opened */
17089 static int mce_chrdev_open_exclu; /* already open exclusive? */
17090
17091 static int mce_chrdev_open(struct inode *inode, struct file *file)
17092@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17093 spin_lock(&mce_chrdev_state_lock);
17094
17095 if (mce_chrdev_open_exclu ||
17096- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17097+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17098 spin_unlock(&mce_chrdev_state_lock);
17099
17100 return -EBUSY;
17101@@ -1715,7 +1718,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17102
17103 if (file->f_flags & O_EXCL)
17104 mce_chrdev_open_exclu = 1;
17105- mce_chrdev_open_count++;
17106+ local_inc(&mce_chrdev_open_count);
17107
17108 spin_unlock(&mce_chrdev_state_lock);
17109
17110@@ -1726,7 +1729,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17111 {
17112 spin_lock(&mce_chrdev_state_lock);
17113
17114- mce_chrdev_open_count--;
17115+ local_dec(&mce_chrdev_open_count);
17116 mce_chrdev_open_exclu = 0;
17117
17118 spin_unlock(&mce_chrdev_state_lock);
17119@@ -2372,7 +2375,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17120 return NOTIFY_OK;
17121 }
17122
17123-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17124+static struct notifier_block mce_cpu_notifier = {
17125 .notifier_call = mce_cpu_callback,
17126 };
17127
17128@@ -2382,7 +2385,7 @@ static __init void mce_init_banks(void)
17129
17130 for (i = 0; i < mca_cfg.banks; i++) {
17131 struct mce_bank *b = &mce_banks[i];
17132- struct device_attribute *a = &b->attr;
17133+ device_attribute_no_const *a = &b->attr;
17134
17135 sysfs_attr_init(&a->attr);
17136 a->attr.name = b->attrname;
17137@@ -2450,7 +2453,7 @@ struct dentry *mce_get_debugfs_dir(void)
17138 static void mce_reset(void)
17139 {
17140 cpu_missing = 0;
17141- atomic_set(&mce_fake_paniced, 0);
17142+ atomic_set_unchecked(&mce_fake_paniced, 0);
17143 atomic_set(&mce_executing, 0);
17144 atomic_set(&mce_callin, 0);
17145 atomic_set(&global_nwo, 0);
17146diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17147index 2d5454c..51987eb 100644
17148--- a/arch/x86/kernel/cpu/mcheck/p5.c
17149+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17150@@ -11,6 +11,7 @@
17151 #include <asm/processor.h>
17152 #include <asm/mce.h>
17153 #include <asm/msr.h>
17154+#include <asm/pgtable.h>
17155
17156 /* By default disabled */
17157 int mce_p5_enabled __read_mostly;
17158@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17159 if (!cpu_has(c, X86_FEATURE_MCE))
17160 return;
17161
17162+ pax_open_kernel();
17163 machine_check_vector = pentium_machine_check;
17164+ pax_close_kernel();
17165 /* Make sure the vector pointer is visible before we enable MCEs: */
17166 wmb();
17167
17168diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17169index 47a1870..8c019a7 100644
17170--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17171+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17172@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17173 return notifier_from_errno(err);
17174 }
17175
17176-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17177+static struct notifier_block thermal_throttle_cpu_notifier =
17178 {
17179 .notifier_call = thermal_throttle_cpu_callback,
17180 };
17181diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17182index 2d7998f..17c9de1 100644
17183--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17184+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17185@@ -10,6 +10,7 @@
17186 #include <asm/processor.h>
17187 #include <asm/mce.h>
17188 #include <asm/msr.h>
17189+#include <asm/pgtable.h>
17190
17191 /* Machine check handler for WinChip C6: */
17192 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17193@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17194 {
17195 u32 lo, hi;
17196
17197+ pax_open_kernel();
17198 machine_check_vector = winchip_machine_check;
17199+ pax_close_kernel();
17200 /* Make sure the vector pointer is visible before we enable MCEs: */
17201 wmb();
17202
17203diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17204index 726bf96..81f0526 100644
17205--- a/arch/x86/kernel/cpu/mtrr/main.c
17206+++ b/arch/x86/kernel/cpu/mtrr/main.c
17207@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17208 u64 size_or_mask, size_and_mask;
17209 static bool mtrr_aps_delayed_init;
17210
17211-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17212+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17213
17214 const struct mtrr_ops *mtrr_if;
17215
17216diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17217index df5e41f..816c719 100644
17218--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17219+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17220@@ -25,7 +25,7 @@ struct mtrr_ops {
17221 int (*validate_add_page)(unsigned long base, unsigned long size,
17222 unsigned int type);
17223 int (*have_wrcomb)(void);
17224-};
17225+} __do_const;
17226
17227 extern int generic_get_free_region(unsigned long base, unsigned long size,
17228 int replace_reg);
17229diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17230index 6774c17..72c1b22 100644
17231--- a/arch/x86/kernel/cpu/perf_event.c
17232+++ b/arch/x86/kernel/cpu/perf_event.c
17233@@ -1305,7 +1305,7 @@ static void __init pmu_check_apic(void)
17234 pr_info("no hardware sampling interrupt available.\n");
17235 }
17236
17237-static struct attribute_group x86_pmu_format_group = {
17238+static attribute_group_no_const x86_pmu_format_group = {
17239 .name = "format",
17240 .attrs = NULL,
17241 };
17242@@ -1313,7 +1313,7 @@ static struct attribute_group x86_pmu_format_group = {
17243 struct perf_pmu_events_attr {
17244 struct device_attribute attr;
17245 u64 id;
17246-};
17247+} __do_const;
17248
17249 /*
17250 * Remove all undefined events (x86_pmu.event_map(id) == 0)
17251@@ -1381,7 +1381,7 @@ static struct attribute *events_attr[] = {
17252 NULL,
17253 };
17254
17255-static struct attribute_group x86_pmu_events_group = {
17256+static attribute_group_no_const x86_pmu_events_group = {
17257 .name = "events",
17258 .attrs = events_attr,
17259 };
17260@@ -1880,7 +1880,7 @@ static unsigned long get_segment_base(unsigned int segment)
17261 if (idx > GDT_ENTRIES)
17262 return 0;
17263
17264- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17265+ desc = get_cpu_gdt_table(smp_processor_id());
17266 }
17267
17268 return get_desc_base(desc + idx);
17269@@ -1970,7 +1970,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17270 break;
17271
17272 perf_callchain_store(entry, frame.return_address);
17273- fp = frame.next_frame;
17274+ fp = (const void __force_user *)frame.next_frame;
17275 }
17276 }
17277
17278diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17279index 4914e94..60b06e3 100644
17280--- a/arch/x86/kernel/cpu/perf_event_intel.c
17281+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17282@@ -1958,10 +1958,10 @@ __init int intel_pmu_init(void)
17283 * v2 and above have a perf capabilities MSR
17284 */
17285 if (version > 1) {
17286- u64 capabilities;
17287+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17288
17289- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17290- x86_pmu.intel_cap.capabilities = capabilities;
17291+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17292+ x86_pmu.intel_cap.capabilities = capabilities;
17293 }
17294
17295 intel_ds_init();
17296diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17297index b43200d..7fdcdbb 100644
17298--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17299+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17300@@ -2428,7 +2428,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17301 static int __init uncore_type_init(struct intel_uncore_type *type)
17302 {
17303 struct intel_uncore_pmu *pmus;
17304- struct attribute_group *events_group;
17305+ attribute_group_no_const *events_group;
17306 struct attribute **attrs;
17307 int i, j;
17308
17309@@ -2826,7 +2826,7 @@ static int
17310 return NOTIFY_OK;
17311 }
17312
17313-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17314+static struct notifier_block uncore_cpu_nb = {
17315 .notifier_call = uncore_cpu_notifier,
17316 /*
17317 * to migrate uncore events, our notifier should be executed
17318diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17319index e68a455..975a932 100644
17320--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17321+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17322@@ -428,7 +428,7 @@ struct intel_uncore_box {
17323 struct uncore_event_desc {
17324 struct kobj_attribute attr;
17325 const char *config;
17326-};
17327+} __do_const;
17328
17329 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17330 { \
17331diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17332index 60c7891..9e911d3 100644
17333--- a/arch/x86/kernel/cpuid.c
17334+++ b/arch/x86/kernel/cpuid.c
17335@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17336 return notifier_from_errno(err);
17337 }
17338
17339-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17340+static struct notifier_block cpuid_class_cpu_notifier =
17341 {
17342 .notifier_call = cpuid_class_cpu_callback,
17343 };
17344diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17345index 74467fe..18793d5 100644
17346--- a/arch/x86/kernel/crash.c
17347+++ b/arch/x86/kernel/crash.c
17348@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17349 {
17350 #ifdef CONFIG_X86_32
17351 struct pt_regs fixed_regs;
17352-#endif
17353
17354-#ifdef CONFIG_X86_32
17355- if (!user_mode_vm(regs)) {
17356+ if (!user_mode(regs)) {
17357 crash_fixup_ss_esp(&fixed_regs, regs);
17358 regs = &fixed_regs;
17359 }
17360diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17361index 37250fe..bf2ec74 100644
17362--- a/arch/x86/kernel/doublefault_32.c
17363+++ b/arch/x86/kernel/doublefault_32.c
17364@@ -11,7 +11,7 @@
17365
17366 #define DOUBLEFAULT_STACKSIZE (1024)
17367 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17368-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17369+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17370
17371 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17372
17373@@ -21,7 +21,7 @@ static void doublefault_fn(void)
17374 unsigned long gdt, tss;
17375
17376 store_gdt(&gdt_desc);
17377- gdt = gdt_desc.address;
17378+ gdt = (unsigned long)gdt_desc.address;
17379
17380 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17381
17382@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17383 /* 0x2 bit is always set */
17384 .flags = X86_EFLAGS_SF | 0x2,
17385 .sp = STACK_START,
17386- .es = __USER_DS,
17387+ .es = __KERNEL_DS,
17388 .cs = __KERNEL_CS,
17389 .ss = __KERNEL_DS,
17390- .ds = __USER_DS,
17391+ .ds = __KERNEL_DS,
17392 .fs = __KERNEL_PERCPU,
17393
17394 .__cr3 = __pa_nodebug(swapper_pg_dir),
17395diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17396index ae42418b..787c16b 100644
17397--- a/arch/x86/kernel/dumpstack.c
17398+++ b/arch/x86/kernel/dumpstack.c
17399@@ -2,6 +2,9 @@
17400 * Copyright (C) 1991, 1992 Linus Torvalds
17401 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17402 */
17403+#ifdef CONFIG_GRKERNSEC_HIDESYM
17404+#define __INCLUDED_BY_HIDESYM 1
17405+#endif
17406 #include <linux/kallsyms.h>
17407 #include <linux/kprobes.h>
17408 #include <linux/uaccess.h>
17409@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17410 static void
17411 print_ftrace_graph_addr(unsigned long addr, void *data,
17412 const struct stacktrace_ops *ops,
17413- struct thread_info *tinfo, int *graph)
17414+ struct task_struct *task, int *graph)
17415 {
17416- struct task_struct *task;
17417 unsigned long ret_addr;
17418 int index;
17419
17420 if (addr != (unsigned long)return_to_handler)
17421 return;
17422
17423- task = tinfo->task;
17424 index = task->curr_ret_stack;
17425
17426 if (!task->ret_stack || index < *graph)
17427@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17428 static inline void
17429 print_ftrace_graph_addr(unsigned long addr, void *data,
17430 const struct stacktrace_ops *ops,
17431- struct thread_info *tinfo, int *graph)
17432+ struct task_struct *task, int *graph)
17433 { }
17434 #endif
17435
17436@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17437 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17438 */
17439
17440-static inline int valid_stack_ptr(struct thread_info *tinfo,
17441- void *p, unsigned int size, void *end)
17442+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17443 {
17444- void *t = tinfo;
17445 if (end) {
17446 if (p < end && p >= (end-THREAD_SIZE))
17447 return 1;
17448@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17449 }
17450
17451 unsigned long
17452-print_context_stack(struct thread_info *tinfo,
17453+print_context_stack(struct task_struct *task, void *stack_start,
17454 unsigned long *stack, unsigned long bp,
17455 const struct stacktrace_ops *ops, void *data,
17456 unsigned long *end, int *graph)
17457 {
17458 struct stack_frame *frame = (struct stack_frame *)bp;
17459
17460- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17461+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17462 unsigned long addr;
17463
17464 addr = *stack;
17465@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17466 } else {
17467 ops->address(data, addr, 0);
17468 }
17469- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17470+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17471 }
17472 stack++;
17473 }
17474@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17475 EXPORT_SYMBOL_GPL(print_context_stack);
17476
17477 unsigned long
17478-print_context_stack_bp(struct thread_info *tinfo,
17479+print_context_stack_bp(struct task_struct *task, void *stack_start,
17480 unsigned long *stack, unsigned long bp,
17481 const struct stacktrace_ops *ops, void *data,
17482 unsigned long *end, int *graph)
17483@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17484 struct stack_frame *frame = (struct stack_frame *)bp;
17485 unsigned long *ret_addr = &frame->return_address;
17486
17487- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17488+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17489 unsigned long addr = *ret_addr;
17490
17491 if (!__kernel_text_address(addr))
17492@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17493 ops->address(data, addr, 1);
17494 frame = frame->next_frame;
17495 ret_addr = &frame->return_address;
17496- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17497+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17498 }
17499
17500 return (unsigned long)frame;
17501@@ -189,7 +188,7 @@ void dump_stack(void)
17502
17503 bp = stack_frame(current, NULL);
17504 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
17505- current->pid, current->comm, print_tainted(),
17506+ task_pid_nr(current), current->comm, print_tainted(),
17507 init_utsname()->release,
17508 (int)strcspn(init_utsname()->version, " "),
17509 init_utsname()->version);
17510@@ -225,6 +224,8 @@ unsigned __kprobes long oops_begin(void)
17511 }
17512 EXPORT_SYMBOL_GPL(oops_begin);
17513
17514+extern void gr_handle_kernel_exploit(void);
17515+
17516 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17517 {
17518 if (regs && kexec_should_crash(current))
17519@@ -246,7 +247,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17520 panic("Fatal exception in interrupt");
17521 if (panic_on_oops)
17522 panic("Fatal exception");
17523- do_exit(signr);
17524+
17525+ gr_handle_kernel_exploit();
17526+
17527+ do_group_exit(signr);
17528 }
17529
17530 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17531@@ -274,7 +278,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17532 print_modules();
17533 show_regs(regs);
17534 #ifdef CONFIG_X86_32
17535- if (user_mode_vm(regs)) {
17536+ if (user_mode(regs)) {
17537 sp = regs->sp;
17538 ss = regs->ss & 0xffff;
17539 } else {
17540@@ -302,7 +306,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17541 unsigned long flags = oops_begin();
17542 int sig = SIGSEGV;
17543
17544- if (!user_mode_vm(regs))
17545+ if (!user_mode(regs))
17546 report_bug(regs->ip, regs);
17547
17548 if (__die(str, regs, err))
17549diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17550index 1038a41..db2c12b 100644
17551--- a/arch/x86/kernel/dumpstack_32.c
17552+++ b/arch/x86/kernel/dumpstack_32.c
17553@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17554 bp = stack_frame(task, regs);
17555
17556 for (;;) {
17557- struct thread_info *context;
17558+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17559
17560- context = (struct thread_info *)
17561- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17562- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17563+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17564
17565- stack = (unsigned long *)context->previous_esp;
17566- if (!stack)
17567+ if (stack_start == task_stack_page(task))
17568 break;
17569+ stack = *(unsigned long **)stack_start;
17570 if (ops->stack(data, "IRQ") < 0)
17571 break;
17572 touch_nmi_watchdog();
17573@@ -86,7 +84,7 @@ void show_regs(struct pt_regs *regs)
17574 {
17575 int i;
17576
17577- __show_regs(regs, !user_mode_vm(regs));
17578+ __show_regs(regs, !user_mode(regs));
17579
17580 pr_emerg("Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)\n",
17581 TASK_COMM_LEN, current->comm, task_pid_nr(current),
17582@@ -95,21 +93,22 @@ void show_regs(struct pt_regs *regs)
17583 * When in-kernel, we also print out the stack and code at the
17584 * time of the fault..
17585 */
17586- if (!user_mode_vm(regs)) {
17587+ if (!user_mode(regs)) {
17588 unsigned int code_prologue = code_bytes * 43 / 64;
17589 unsigned int code_len = code_bytes;
17590 unsigned char c;
17591 u8 *ip;
17592+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
17593
17594 pr_emerg("Stack:\n");
17595 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
17596
17597 pr_emerg("Code:");
17598
17599- ip = (u8 *)regs->ip - code_prologue;
17600+ ip = (u8 *)regs->ip - code_prologue + cs_base;
17601 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
17602 /* try starting at IP */
17603- ip = (u8 *)regs->ip;
17604+ ip = (u8 *)regs->ip + cs_base;
17605 code_len = code_len - code_prologue + 1;
17606 }
17607 for (i = 0; i < code_len; i++, ip++) {
17608@@ -118,7 +117,7 @@ void show_regs(struct pt_regs *regs)
17609 pr_cont(" Bad EIP value.");
17610 break;
17611 }
17612- if (ip == (u8 *)regs->ip)
17613+ if (ip == (u8 *)regs->ip + cs_base)
17614 pr_cont(" <%02x>", c);
17615 else
17616 pr_cont(" %02x", c);
17617@@ -131,6 +130,7 @@ int is_valid_bugaddr(unsigned long ip)
17618 {
17619 unsigned short ud2;
17620
17621+ ip = ktla_ktva(ip);
17622 if (ip < PAGE_OFFSET)
17623 return 0;
17624 if (probe_kernel_address((unsigned short *)ip, ud2))
17625@@ -138,3 +138,15 @@ int is_valid_bugaddr(unsigned long ip)
17626
17627 return ud2 == 0x0b0f;
17628 }
17629+
17630+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17631+void pax_check_alloca(unsigned long size)
17632+{
17633+ unsigned long sp = (unsigned long)&sp, stack_left;
17634+
17635+ /* all kernel stacks are of the same size */
17636+ stack_left = sp & (THREAD_SIZE - 1);
17637+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17638+}
17639+EXPORT_SYMBOL(pax_check_alloca);
17640+#endif
17641diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
17642index b653675..51cc8c0 100644
17643--- a/arch/x86/kernel/dumpstack_64.c
17644+++ b/arch/x86/kernel/dumpstack_64.c
17645@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17646 unsigned long *irq_stack_end =
17647 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
17648 unsigned used = 0;
17649- struct thread_info *tinfo;
17650 int graph = 0;
17651 unsigned long dummy;
17652+ void *stack_start;
17653
17654 if (!task)
17655 task = current;
17656@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17657 * current stack address. If the stacks consist of nested
17658 * exceptions
17659 */
17660- tinfo = task_thread_info(task);
17661 for (;;) {
17662 char *id;
17663 unsigned long *estack_end;
17664+
17665 estack_end = in_exception_stack(cpu, (unsigned long)stack,
17666 &used, &id);
17667
17668@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17669 if (ops->stack(data, id) < 0)
17670 break;
17671
17672- bp = ops->walk_stack(tinfo, stack, bp, ops,
17673+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
17674 data, estack_end, &graph);
17675 ops->stack(data, "<EOE>");
17676 /*
17677@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17678 * second-to-last pointer (index -2 to end) in the
17679 * exception stack:
17680 */
17681+ if ((u16)estack_end[-1] != __KERNEL_DS)
17682+ goto out;
17683 stack = (unsigned long *) estack_end[-2];
17684 continue;
17685 }
17686@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17687 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
17688 if (ops->stack(data, "IRQ") < 0)
17689 break;
17690- bp = ops->walk_stack(tinfo, stack, bp,
17691+ bp = ops->walk_stack(task, irq_stack, stack, bp,
17692 ops, data, irq_stack_end, &graph);
17693 /*
17694 * We link to the next stack (which would be
17695@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17696 /*
17697 * This handles the process stack:
17698 */
17699- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
17700+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17701+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17702+out:
17703 put_cpu();
17704 }
17705 EXPORT_SYMBOL(dump_trace);
17706@@ -249,7 +253,7 @@ void show_regs(struct pt_regs *regs)
17707 {
17708 int i;
17709 unsigned long sp;
17710- const int cpu = smp_processor_id();
17711+ const int cpu = raw_smp_processor_id();
17712 struct task_struct *cur = current;
17713
17714 sp = regs->sp;
17715@@ -304,3 +308,50 @@ int is_valid_bugaddr(unsigned long ip)
17716
17717 return ud2 == 0x0b0f;
17718 }
17719+
17720+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17721+void pax_check_alloca(unsigned long size)
17722+{
17723+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
17724+ unsigned cpu, used;
17725+ char *id;
17726+
17727+ /* check the process stack first */
17728+ stack_start = (unsigned long)task_stack_page(current);
17729+ stack_end = stack_start + THREAD_SIZE;
17730+ if (likely(stack_start <= sp && sp < stack_end)) {
17731+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
17732+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17733+ return;
17734+ }
17735+
17736+ cpu = get_cpu();
17737+
17738+ /* check the irq stacks */
17739+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
17740+ stack_start = stack_end - IRQ_STACK_SIZE;
17741+ if (stack_start <= sp && sp < stack_end) {
17742+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
17743+ put_cpu();
17744+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17745+ return;
17746+ }
17747+
17748+ /* check the exception stacks */
17749+ used = 0;
17750+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
17751+ stack_start = stack_end - EXCEPTION_STKSZ;
17752+ if (stack_end && stack_start <= sp && sp < stack_end) {
17753+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
17754+ put_cpu();
17755+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
17756+ return;
17757+ }
17758+
17759+ put_cpu();
17760+
17761+ /* unknown stack */
17762+ BUG();
17763+}
17764+EXPORT_SYMBOL(pax_check_alloca);
17765+#endif
17766diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
17767index 9b9f18b..9fcaa04 100644
17768--- a/arch/x86/kernel/early_printk.c
17769+++ b/arch/x86/kernel/early_printk.c
17770@@ -7,6 +7,7 @@
17771 #include <linux/pci_regs.h>
17772 #include <linux/pci_ids.h>
17773 #include <linux/errno.h>
17774+#include <linux/sched.h>
17775 #include <asm/io.h>
17776 #include <asm/processor.h>
17777 #include <asm/fcntl.h>
17778diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
17779index 6ed91d9..6cc365b 100644
17780--- a/arch/x86/kernel/entry_32.S
17781+++ b/arch/x86/kernel/entry_32.S
17782@@ -177,13 +177,153 @@
17783 /*CFI_REL_OFFSET gs, PT_GS*/
17784 .endm
17785 .macro SET_KERNEL_GS reg
17786+
17787+#ifdef CONFIG_CC_STACKPROTECTOR
17788 movl $(__KERNEL_STACK_CANARY), \reg
17789+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17790+ movl $(__USER_DS), \reg
17791+#else
17792+ xorl \reg, \reg
17793+#endif
17794+
17795 movl \reg, %gs
17796 .endm
17797
17798 #endif /* CONFIG_X86_32_LAZY_GS */
17799
17800-.macro SAVE_ALL
17801+.macro pax_enter_kernel
17802+#ifdef CONFIG_PAX_KERNEXEC
17803+ call pax_enter_kernel
17804+#endif
17805+.endm
17806+
17807+.macro pax_exit_kernel
17808+#ifdef CONFIG_PAX_KERNEXEC
17809+ call pax_exit_kernel
17810+#endif
17811+.endm
17812+
17813+#ifdef CONFIG_PAX_KERNEXEC
17814+ENTRY(pax_enter_kernel)
17815+#ifdef CONFIG_PARAVIRT
17816+ pushl %eax
17817+ pushl %ecx
17818+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
17819+ mov %eax, %esi
17820+#else
17821+ mov %cr0, %esi
17822+#endif
17823+ bts $16, %esi
17824+ jnc 1f
17825+ mov %cs, %esi
17826+ cmp $__KERNEL_CS, %esi
17827+ jz 3f
17828+ ljmp $__KERNEL_CS, $3f
17829+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
17830+2:
17831+#ifdef CONFIG_PARAVIRT
17832+ mov %esi, %eax
17833+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
17834+#else
17835+ mov %esi, %cr0
17836+#endif
17837+3:
17838+#ifdef CONFIG_PARAVIRT
17839+ popl %ecx
17840+ popl %eax
17841+#endif
17842+ ret
17843+ENDPROC(pax_enter_kernel)
17844+
17845+ENTRY(pax_exit_kernel)
17846+#ifdef CONFIG_PARAVIRT
17847+ pushl %eax
17848+ pushl %ecx
17849+#endif
17850+ mov %cs, %esi
17851+ cmp $__KERNEXEC_KERNEL_CS, %esi
17852+ jnz 2f
17853+#ifdef CONFIG_PARAVIRT
17854+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
17855+ mov %eax, %esi
17856+#else
17857+ mov %cr0, %esi
17858+#endif
17859+ btr $16, %esi
17860+ ljmp $__KERNEL_CS, $1f
17861+1:
17862+#ifdef CONFIG_PARAVIRT
17863+ mov %esi, %eax
17864+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
17865+#else
17866+ mov %esi, %cr0
17867+#endif
17868+2:
17869+#ifdef CONFIG_PARAVIRT
17870+ popl %ecx
17871+ popl %eax
17872+#endif
17873+ ret
17874+ENDPROC(pax_exit_kernel)
17875+#endif
17876+
17877+.macro pax_erase_kstack
17878+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17879+ call pax_erase_kstack
17880+#endif
17881+.endm
17882+
17883+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
17884+/*
17885+ * ebp: thread_info
17886+ */
17887+ENTRY(pax_erase_kstack)
17888+ pushl %edi
17889+ pushl %ecx
17890+ pushl %eax
17891+
17892+ mov TI_lowest_stack(%ebp), %edi
17893+ mov $-0xBEEF, %eax
17894+ std
17895+
17896+1: mov %edi, %ecx
17897+ and $THREAD_SIZE_asm - 1, %ecx
17898+ shr $2, %ecx
17899+ repne scasl
17900+ jecxz 2f
17901+
17902+ cmp $2*16, %ecx
17903+ jc 2f
17904+
17905+ mov $2*16, %ecx
17906+ repe scasl
17907+ jecxz 2f
17908+ jne 1b
17909+
17910+2: cld
17911+ mov %esp, %ecx
17912+ sub %edi, %ecx
17913+
17914+ cmp $THREAD_SIZE_asm, %ecx
17915+ jb 3f
17916+ ud2
17917+3:
17918+
17919+ shr $2, %ecx
17920+ rep stosl
17921+
17922+ mov TI_task_thread_sp0(%ebp), %edi
17923+ sub $128, %edi
17924+ mov %edi, TI_lowest_stack(%ebp)
17925+
17926+ popl %eax
17927+ popl %ecx
17928+ popl %edi
17929+ ret
17930+ENDPROC(pax_erase_kstack)
17931+#endif
17932+
17933+.macro __SAVE_ALL _DS
17934 cld
17935 PUSH_GS
17936 pushl_cfi %fs
17937@@ -206,7 +346,7 @@
17938 CFI_REL_OFFSET ecx, 0
17939 pushl_cfi %ebx
17940 CFI_REL_OFFSET ebx, 0
17941- movl $(__USER_DS), %edx
17942+ movl $\_DS, %edx
17943 movl %edx, %ds
17944 movl %edx, %es
17945 movl $(__KERNEL_PERCPU), %edx
17946@@ -214,6 +354,15 @@
17947 SET_KERNEL_GS %edx
17948 .endm
17949
17950+.macro SAVE_ALL
17951+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
17952+ __SAVE_ALL __KERNEL_DS
17953+ pax_enter_kernel
17954+#else
17955+ __SAVE_ALL __USER_DS
17956+#endif
17957+.endm
17958+
17959 .macro RESTORE_INT_REGS
17960 popl_cfi %ebx
17961 CFI_RESTORE ebx
17962@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
17963 popfl_cfi
17964 jmp syscall_exit
17965 CFI_ENDPROC
17966-END(ret_from_fork)
17967+ENDPROC(ret_from_fork)
17968
17969 ENTRY(ret_from_kernel_thread)
17970 CFI_STARTPROC
17971@@ -344,7 +493,15 @@ ret_from_intr:
17972 andl $SEGMENT_RPL_MASK, %eax
17973 #endif
17974 cmpl $USER_RPL, %eax
17975+
17976+#ifdef CONFIG_PAX_KERNEXEC
17977+ jae resume_userspace
17978+
17979+ pax_exit_kernel
17980+ jmp resume_kernel
17981+#else
17982 jb resume_kernel # not returning to v8086 or userspace
17983+#endif
17984
17985 ENTRY(resume_userspace)
17986 LOCKDEP_SYS_EXIT
17987@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
17988 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
17989 # int/exception return?
17990 jne work_pending
17991- jmp restore_all
17992-END(ret_from_exception)
17993+ jmp restore_all_pax
17994+ENDPROC(ret_from_exception)
17995
17996 #ifdef CONFIG_PREEMPT
17997 ENTRY(resume_kernel)
17998@@ -372,7 +529,7 @@ need_resched:
17999 jz restore_all
18000 call preempt_schedule_irq
18001 jmp need_resched
18002-END(resume_kernel)
18003+ENDPROC(resume_kernel)
18004 #endif
18005 CFI_ENDPROC
18006 /*
18007@@ -406,30 +563,45 @@ sysenter_past_esp:
18008 /*CFI_REL_OFFSET cs, 0*/
18009 /*
18010 * Push current_thread_info()->sysenter_return to the stack.
18011- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18012- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18013 */
18014- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18015+ pushl_cfi $0
18016 CFI_REL_OFFSET eip, 0
18017
18018 pushl_cfi %eax
18019 SAVE_ALL
18020+ GET_THREAD_INFO(%ebp)
18021+ movl TI_sysenter_return(%ebp),%ebp
18022+ movl %ebp,PT_EIP(%esp)
18023 ENABLE_INTERRUPTS(CLBR_NONE)
18024
18025 /*
18026 * Load the potential sixth argument from user stack.
18027 * Careful about security.
18028 */
18029+ movl PT_OLDESP(%esp),%ebp
18030+
18031+#ifdef CONFIG_PAX_MEMORY_UDEREF
18032+ mov PT_OLDSS(%esp),%ds
18033+1: movl %ds:(%ebp),%ebp
18034+ push %ss
18035+ pop %ds
18036+#else
18037 cmpl $__PAGE_OFFSET-3,%ebp
18038 jae syscall_fault
18039 ASM_STAC
18040 1: movl (%ebp),%ebp
18041 ASM_CLAC
18042+#endif
18043+
18044 movl %ebp,PT_EBP(%esp)
18045 _ASM_EXTABLE(1b,syscall_fault)
18046
18047 GET_THREAD_INFO(%ebp)
18048
18049+#ifdef CONFIG_PAX_RANDKSTACK
18050+ pax_erase_kstack
18051+#endif
18052+
18053 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18054 jnz sysenter_audit
18055 sysenter_do_call:
18056@@ -444,12 +616,24 @@ sysenter_do_call:
18057 testl $_TIF_ALLWORK_MASK, %ecx
18058 jne sysexit_audit
18059 sysenter_exit:
18060+
18061+#ifdef CONFIG_PAX_RANDKSTACK
18062+ pushl_cfi %eax
18063+ movl %esp, %eax
18064+ call pax_randomize_kstack
18065+ popl_cfi %eax
18066+#endif
18067+
18068+ pax_erase_kstack
18069+
18070 /* if something modifies registers it must also disable sysexit */
18071 movl PT_EIP(%esp), %edx
18072 movl PT_OLDESP(%esp), %ecx
18073 xorl %ebp,%ebp
18074 TRACE_IRQS_ON
18075 1: mov PT_FS(%esp), %fs
18076+2: mov PT_DS(%esp), %ds
18077+3: mov PT_ES(%esp), %es
18078 PTGS_TO_GS
18079 ENABLE_INTERRUPTS_SYSEXIT
18080
18081@@ -466,6 +650,9 @@ sysenter_audit:
18082 movl %eax,%edx /* 2nd arg: syscall number */
18083 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18084 call __audit_syscall_entry
18085+
18086+ pax_erase_kstack
18087+
18088 pushl_cfi %ebx
18089 movl PT_EAX(%esp),%eax /* reload syscall number */
18090 jmp sysenter_do_call
18091@@ -491,10 +678,16 @@ sysexit_audit:
18092
18093 CFI_ENDPROC
18094 .pushsection .fixup,"ax"
18095-2: movl $0,PT_FS(%esp)
18096+4: movl $0,PT_FS(%esp)
18097+ jmp 1b
18098+5: movl $0,PT_DS(%esp)
18099+ jmp 1b
18100+6: movl $0,PT_ES(%esp)
18101 jmp 1b
18102 .popsection
18103- _ASM_EXTABLE(1b,2b)
18104+ _ASM_EXTABLE(1b,4b)
18105+ _ASM_EXTABLE(2b,5b)
18106+ _ASM_EXTABLE(3b,6b)
18107 PTGS_TO_GS_EX
18108 ENDPROC(ia32_sysenter_target)
18109
18110@@ -509,6 +702,11 @@ ENTRY(system_call)
18111 pushl_cfi %eax # save orig_eax
18112 SAVE_ALL
18113 GET_THREAD_INFO(%ebp)
18114+
18115+#ifdef CONFIG_PAX_RANDKSTACK
18116+ pax_erase_kstack
18117+#endif
18118+
18119 # system call tracing in operation / emulation
18120 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18121 jnz syscall_trace_entry
18122@@ -527,6 +725,15 @@ syscall_exit:
18123 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18124 jne syscall_exit_work
18125
18126+restore_all_pax:
18127+
18128+#ifdef CONFIG_PAX_RANDKSTACK
18129+ movl %esp, %eax
18130+ call pax_randomize_kstack
18131+#endif
18132+
18133+ pax_erase_kstack
18134+
18135 restore_all:
18136 TRACE_IRQS_IRET
18137 restore_all_notrace:
18138@@ -583,14 +790,34 @@ ldt_ss:
18139 * compensating for the offset by changing to the ESPFIX segment with
18140 * a base address that matches for the difference.
18141 */
18142-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18143+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18144 mov %esp, %edx /* load kernel esp */
18145 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18146 mov %dx, %ax /* eax: new kernel esp */
18147 sub %eax, %edx /* offset (low word is 0) */
18148+#ifdef CONFIG_SMP
18149+ movl PER_CPU_VAR(cpu_number), %ebx
18150+ shll $PAGE_SHIFT_asm, %ebx
18151+ addl $cpu_gdt_table, %ebx
18152+#else
18153+ movl $cpu_gdt_table, %ebx
18154+#endif
18155 shr $16, %edx
18156- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18157- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18158+
18159+#ifdef CONFIG_PAX_KERNEXEC
18160+ mov %cr0, %esi
18161+ btr $16, %esi
18162+ mov %esi, %cr0
18163+#endif
18164+
18165+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18166+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18167+
18168+#ifdef CONFIG_PAX_KERNEXEC
18169+ bts $16, %esi
18170+ mov %esi, %cr0
18171+#endif
18172+
18173 pushl_cfi $__ESPFIX_SS
18174 pushl_cfi %eax /* new kernel esp */
18175 /* Disable interrupts, but do not irqtrace this section: we
18176@@ -619,20 +846,18 @@ work_resched:
18177 movl TI_flags(%ebp), %ecx
18178 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18179 # than syscall tracing?
18180- jz restore_all
18181+ jz restore_all_pax
18182 testb $_TIF_NEED_RESCHED, %cl
18183 jnz work_resched
18184
18185 work_notifysig: # deal with pending signals and
18186 # notify-resume requests
18187+ movl %esp, %eax
18188 #ifdef CONFIG_VM86
18189 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18190- movl %esp, %eax
18191 jne work_notifysig_v86 # returning to kernel-space or
18192 # vm86-space
18193 1:
18194-#else
18195- movl %esp, %eax
18196 #endif
18197 TRACE_IRQS_ON
18198 ENABLE_INTERRUPTS(CLBR_NONE)
18199@@ -653,7 +878,7 @@ work_notifysig_v86:
18200 movl %eax, %esp
18201 jmp 1b
18202 #endif
18203-END(work_pending)
18204+ENDPROC(work_pending)
18205
18206 # perform syscall exit tracing
18207 ALIGN
18208@@ -661,11 +886,14 @@ syscall_trace_entry:
18209 movl $-ENOSYS,PT_EAX(%esp)
18210 movl %esp, %eax
18211 call syscall_trace_enter
18212+
18213+ pax_erase_kstack
18214+
18215 /* What it returned is what we'll actually use. */
18216 cmpl $(NR_syscalls), %eax
18217 jnae syscall_call
18218 jmp syscall_exit
18219-END(syscall_trace_entry)
18220+ENDPROC(syscall_trace_entry)
18221
18222 # perform syscall exit tracing
18223 ALIGN
18224@@ -678,21 +906,25 @@ syscall_exit_work:
18225 movl %esp, %eax
18226 call syscall_trace_leave
18227 jmp resume_userspace
18228-END(syscall_exit_work)
18229+ENDPROC(syscall_exit_work)
18230 CFI_ENDPROC
18231
18232 RING0_INT_FRAME # can't unwind into user space anyway
18233 syscall_fault:
18234+#ifdef CONFIG_PAX_MEMORY_UDEREF
18235+ push %ss
18236+ pop %ds
18237+#endif
18238 ASM_CLAC
18239 GET_THREAD_INFO(%ebp)
18240 movl $-EFAULT,PT_EAX(%esp)
18241 jmp resume_userspace
18242-END(syscall_fault)
18243+ENDPROC(syscall_fault)
18244
18245 syscall_badsys:
18246 movl $-ENOSYS,PT_EAX(%esp)
18247 jmp resume_userspace
18248-END(syscall_badsys)
18249+ENDPROC(syscall_badsys)
18250 CFI_ENDPROC
18251 /*
18252 * End of kprobes section
18253@@ -753,8 +985,15 @@ PTREGSCALL1(vm86old)
18254 * normal stack and adjusts ESP with the matching offset.
18255 */
18256 /* fixup the stack */
18257- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18258- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18259+#ifdef CONFIG_SMP
18260+ movl PER_CPU_VAR(cpu_number), %ebx
18261+ shll $PAGE_SHIFT_asm, %ebx
18262+ addl $cpu_gdt_table, %ebx
18263+#else
18264+ movl $cpu_gdt_table, %ebx
18265+#endif
18266+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18267+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18268 shl $16, %eax
18269 addl %esp, %eax /* the adjusted stack pointer */
18270 pushl_cfi $__KERNEL_DS
18271@@ -807,7 +1046,7 @@ vector=vector+1
18272 .endr
18273 2: jmp common_interrupt
18274 .endr
18275-END(irq_entries_start)
18276+ENDPROC(irq_entries_start)
18277
18278 .previous
18279 END(interrupt)
18280@@ -858,7 +1097,7 @@ ENTRY(coprocessor_error)
18281 pushl_cfi $do_coprocessor_error
18282 jmp error_code
18283 CFI_ENDPROC
18284-END(coprocessor_error)
18285+ENDPROC(coprocessor_error)
18286
18287 ENTRY(simd_coprocessor_error)
18288 RING0_INT_FRAME
18289@@ -880,7 +1119,7 @@ ENTRY(simd_coprocessor_error)
18290 #endif
18291 jmp error_code
18292 CFI_ENDPROC
18293-END(simd_coprocessor_error)
18294+ENDPROC(simd_coprocessor_error)
18295
18296 ENTRY(device_not_available)
18297 RING0_INT_FRAME
18298@@ -889,18 +1128,18 @@ ENTRY(device_not_available)
18299 pushl_cfi $do_device_not_available
18300 jmp error_code
18301 CFI_ENDPROC
18302-END(device_not_available)
18303+ENDPROC(device_not_available)
18304
18305 #ifdef CONFIG_PARAVIRT
18306 ENTRY(native_iret)
18307 iret
18308 _ASM_EXTABLE(native_iret, iret_exc)
18309-END(native_iret)
18310+ENDPROC(native_iret)
18311
18312 ENTRY(native_irq_enable_sysexit)
18313 sti
18314 sysexit
18315-END(native_irq_enable_sysexit)
18316+ENDPROC(native_irq_enable_sysexit)
18317 #endif
18318
18319 ENTRY(overflow)
18320@@ -910,7 +1149,7 @@ ENTRY(overflow)
18321 pushl_cfi $do_overflow
18322 jmp error_code
18323 CFI_ENDPROC
18324-END(overflow)
18325+ENDPROC(overflow)
18326
18327 ENTRY(bounds)
18328 RING0_INT_FRAME
18329@@ -919,7 +1158,7 @@ ENTRY(bounds)
18330 pushl_cfi $do_bounds
18331 jmp error_code
18332 CFI_ENDPROC
18333-END(bounds)
18334+ENDPROC(bounds)
18335
18336 ENTRY(invalid_op)
18337 RING0_INT_FRAME
18338@@ -928,7 +1167,7 @@ ENTRY(invalid_op)
18339 pushl_cfi $do_invalid_op
18340 jmp error_code
18341 CFI_ENDPROC
18342-END(invalid_op)
18343+ENDPROC(invalid_op)
18344
18345 ENTRY(coprocessor_segment_overrun)
18346 RING0_INT_FRAME
18347@@ -937,7 +1176,7 @@ ENTRY(coprocessor_segment_overrun)
18348 pushl_cfi $do_coprocessor_segment_overrun
18349 jmp error_code
18350 CFI_ENDPROC
18351-END(coprocessor_segment_overrun)
18352+ENDPROC(coprocessor_segment_overrun)
18353
18354 ENTRY(invalid_TSS)
18355 RING0_EC_FRAME
18356@@ -945,7 +1184,7 @@ ENTRY(invalid_TSS)
18357 pushl_cfi $do_invalid_TSS
18358 jmp error_code
18359 CFI_ENDPROC
18360-END(invalid_TSS)
18361+ENDPROC(invalid_TSS)
18362
18363 ENTRY(segment_not_present)
18364 RING0_EC_FRAME
18365@@ -953,7 +1192,7 @@ ENTRY(segment_not_present)
18366 pushl_cfi $do_segment_not_present
18367 jmp error_code
18368 CFI_ENDPROC
18369-END(segment_not_present)
18370+ENDPROC(segment_not_present)
18371
18372 ENTRY(stack_segment)
18373 RING0_EC_FRAME
18374@@ -961,7 +1200,7 @@ ENTRY(stack_segment)
18375 pushl_cfi $do_stack_segment
18376 jmp error_code
18377 CFI_ENDPROC
18378-END(stack_segment)
18379+ENDPROC(stack_segment)
18380
18381 ENTRY(alignment_check)
18382 RING0_EC_FRAME
18383@@ -969,7 +1208,7 @@ ENTRY(alignment_check)
18384 pushl_cfi $do_alignment_check
18385 jmp error_code
18386 CFI_ENDPROC
18387-END(alignment_check)
18388+ENDPROC(alignment_check)
18389
18390 ENTRY(divide_error)
18391 RING0_INT_FRAME
18392@@ -978,7 +1217,7 @@ ENTRY(divide_error)
18393 pushl_cfi $do_divide_error
18394 jmp error_code
18395 CFI_ENDPROC
18396-END(divide_error)
18397+ENDPROC(divide_error)
18398
18399 #ifdef CONFIG_X86_MCE
18400 ENTRY(machine_check)
18401@@ -988,7 +1227,7 @@ ENTRY(machine_check)
18402 pushl_cfi machine_check_vector
18403 jmp error_code
18404 CFI_ENDPROC
18405-END(machine_check)
18406+ENDPROC(machine_check)
18407 #endif
18408
18409 ENTRY(spurious_interrupt_bug)
18410@@ -998,7 +1237,7 @@ ENTRY(spurious_interrupt_bug)
18411 pushl_cfi $do_spurious_interrupt_bug
18412 jmp error_code
18413 CFI_ENDPROC
18414-END(spurious_interrupt_bug)
18415+ENDPROC(spurious_interrupt_bug)
18416 /*
18417 * End of kprobes section
18418 */
18419@@ -1101,7 +1340,7 @@ BUILD_INTERRUPT3(xen_hvm_callback_vector, XEN_HVM_EVTCHN_CALLBACK,
18420
18421 ENTRY(mcount)
18422 ret
18423-END(mcount)
18424+ENDPROC(mcount)
18425
18426 ENTRY(ftrace_caller)
18427 cmpl $0, function_trace_stop
18428@@ -1134,7 +1373,7 @@ ftrace_graph_call:
18429 .globl ftrace_stub
18430 ftrace_stub:
18431 ret
18432-END(ftrace_caller)
18433+ENDPROC(ftrace_caller)
18434
18435 ENTRY(ftrace_regs_caller)
18436 pushf /* push flags before compare (in cs location) */
18437@@ -1235,7 +1474,7 @@ trace:
18438 popl %ecx
18439 popl %eax
18440 jmp ftrace_stub
18441-END(mcount)
18442+ENDPROC(mcount)
18443 #endif /* CONFIG_DYNAMIC_FTRACE */
18444 #endif /* CONFIG_FUNCTION_TRACER */
18445
18446@@ -1253,7 +1492,7 @@ ENTRY(ftrace_graph_caller)
18447 popl %ecx
18448 popl %eax
18449 ret
18450-END(ftrace_graph_caller)
18451+ENDPROC(ftrace_graph_caller)
18452
18453 .globl return_to_handler
18454 return_to_handler:
18455@@ -1309,15 +1548,18 @@ error_code:
18456 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18457 REG_TO_PTGS %ecx
18458 SET_KERNEL_GS %ecx
18459- movl $(__USER_DS), %ecx
18460+ movl $(__KERNEL_DS), %ecx
18461 movl %ecx, %ds
18462 movl %ecx, %es
18463+
18464+ pax_enter_kernel
18465+
18466 TRACE_IRQS_OFF
18467 movl %esp,%eax # pt_regs pointer
18468 call *%edi
18469 jmp ret_from_exception
18470 CFI_ENDPROC
18471-END(page_fault)
18472+ENDPROC(page_fault)
18473
18474 /*
18475 * Debug traps and NMI can happen at the one SYSENTER instruction
18476@@ -1360,7 +1602,7 @@ debug_stack_correct:
18477 call do_debug
18478 jmp ret_from_exception
18479 CFI_ENDPROC
18480-END(debug)
18481+ENDPROC(debug)
18482
18483 /*
18484 * NMI is doubly nasty. It can happen _while_ we're handling
18485@@ -1398,6 +1640,9 @@ nmi_stack_correct:
18486 xorl %edx,%edx # zero error code
18487 movl %esp,%eax # pt_regs pointer
18488 call do_nmi
18489+
18490+ pax_exit_kernel
18491+
18492 jmp restore_all_notrace
18493 CFI_ENDPROC
18494
18495@@ -1434,12 +1679,15 @@ nmi_espfix_stack:
18496 FIXUP_ESPFIX_STACK # %eax == %esp
18497 xorl %edx,%edx # zero error code
18498 call do_nmi
18499+
18500+ pax_exit_kernel
18501+
18502 RESTORE_REGS
18503 lss 12+4(%esp), %esp # back to espfix stack
18504 CFI_ADJUST_CFA_OFFSET -24
18505 jmp irq_return
18506 CFI_ENDPROC
18507-END(nmi)
18508+ENDPROC(nmi)
18509
18510 ENTRY(int3)
18511 RING0_INT_FRAME
18512@@ -1452,14 +1700,14 @@ ENTRY(int3)
18513 call do_int3
18514 jmp ret_from_exception
18515 CFI_ENDPROC
18516-END(int3)
18517+ENDPROC(int3)
18518
18519 ENTRY(general_protection)
18520 RING0_EC_FRAME
18521 pushl_cfi $do_general_protection
18522 jmp error_code
18523 CFI_ENDPROC
18524-END(general_protection)
18525+ENDPROC(general_protection)
18526
18527 #ifdef CONFIG_KVM_GUEST
18528 ENTRY(async_page_fault)
18529@@ -1468,7 +1716,7 @@ ENTRY(async_page_fault)
18530 pushl_cfi $do_async_page_fault
18531 jmp error_code
18532 CFI_ENDPROC
18533-END(async_page_fault)
18534+ENDPROC(async_page_fault)
18535 #endif
18536
18537 /*
18538diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18539index cb3c591..bc63707 100644
18540--- a/arch/x86/kernel/entry_64.S
18541+++ b/arch/x86/kernel/entry_64.S
18542@@ -59,6 +59,8 @@
18543 #include <asm/context_tracking.h>
18544 #include <asm/smap.h>
18545 #include <linux/err.h>
18546+#include <asm/pgtable.h>
18547+#include <asm/alternative-asm.h>
18548
18549 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18550 #include <linux/elf-em.h>
18551@@ -80,8 +82,9 @@
18552 #ifdef CONFIG_DYNAMIC_FTRACE
18553
18554 ENTRY(function_hook)
18555+ pax_force_retaddr
18556 retq
18557-END(function_hook)
18558+ENDPROC(function_hook)
18559
18560 /* skip is set if stack has been adjusted */
18561 .macro ftrace_caller_setup skip=0
18562@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
18563 #endif
18564
18565 GLOBAL(ftrace_stub)
18566+ pax_force_retaddr
18567 retq
18568-END(ftrace_caller)
18569+ENDPROC(ftrace_caller)
18570
18571 ENTRY(ftrace_regs_caller)
18572 /* Save the current flags before compare (in SS location)*/
18573@@ -191,7 +195,7 @@ ftrace_restore_flags:
18574 popfq
18575 jmp ftrace_stub
18576
18577-END(ftrace_regs_caller)
18578+ENDPROC(ftrace_regs_caller)
18579
18580
18581 #else /* ! CONFIG_DYNAMIC_FTRACE */
18582@@ -212,6 +216,7 @@ ENTRY(function_hook)
18583 #endif
18584
18585 GLOBAL(ftrace_stub)
18586+ pax_force_retaddr
18587 retq
18588
18589 trace:
18590@@ -225,12 +230,13 @@ trace:
18591 #endif
18592 subq $MCOUNT_INSN_SIZE, %rdi
18593
18594+ pax_force_fptr ftrace_trace_function
18595 call *ftrace_trace_function
18596
18597 MCOUNT_RESTORE_FRAME
18598
18599 jmp ftrace_stub
18600-END(function_hook)
18601+ENDPROC(function_hook)
18602 #endif /* CONFIG_DYNAMIC_FTRACE */
18603 #endif /* CONFIG_FUNCTION_TRACER */
18604
18605@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
18606
18607 MCOUNT_RESTORE_FRAME
18608
18609+ pax_force_retaddr
18610 retq
18611-END(ftrace_graph_caller)
18612+ENDPROC(ftrace_graph_caller)
18613
18614 GLOBAL(return_to_handler)
18615 subq $24, %rsp
18616@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
18617 movq 8(%rsp), %rdx
18618 movq (%rsp), %rax
18619 addq $24, %rsp
18620+ pax_force_fptr %rdi
18621 jmp *%rdi
18622+ENDPROC(return_to_handler)
18623 #endif
18624
18625
18626@@ -284,6 +293,273 @@ ENTRY(native_usergs_sysret64)
18627 ENDPROC(native_usergs_sysret64)
18628 #endif /* CONFIG_PARAVIRT */
18629
18630+ .macro ljmpq sel, off
18631+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
18632+ .byte 0x48; ljmp *1234f(%rip)
18633+ .pushsection .rodata
18634+ .align 16
18635+ 1234: .quad \off; .word \sel
18636+ .popsection
18637+#else
18638+ pushq $\sel
18639+ pushq $\off
18640+ lretq
18641+#endif
18642+ .endm
18643+
18644+ .macro pax_enter_kernel
18645+ pax_set_fptr_mask
18646+#ifdef CONFIG_PAX_KERNEXEC
18647+ call pax_enter_kernel
18648+#endif
18649+ .endm
18650+
18651+ .macro pax_exit_kernel
18652+#ifdef CONFIG_PAX_KERNEXEC
18653+ call pax_exit_kernel
18654+#endif
18655+ .endm
18656+
18657+#ifdef CONFIG_PAX_KERNEXEC
18658+ENTRY(pax_enter_kernel)
18659+ pushq %rdi
18660+
18661+#ifdef CONFIG_PARAVIRT
18662+ PV_SAVE_REGS(CLBR_RDI)
18663+#endif
18664+
18665+ GET_CR0_INTO_RDI
18666+ bts $16,%rdi
18667+ jnc 3f
18668+ mov %cs,%edi
18669+ cmp $__KERNEL_CS,%edi
18670+ jnz 2f
18671+1:
18672+
18673+#ifdef CONFIG_PARAVIRT
18674+ PV_RESTORE_REGS(CLBR_RDI)
18675+#endif
18676+
18677+ popq %rdi
18678+ pax_force_retaddr
18679+ retq
18680+
18681+2: ljmpq __KERNEL_CS,1f
18682+3: ljmpq __KERNEXEC_KERNEL_CS,4f
18683+4: SET_RDI_INTO_CR0
18684+ jmp 1b
18685+ENDPROC(pax_enter_kernel)
18686+
18687+ENTRY(pax_exit_kernel)
18688+ pushq %rdi
18689+
18690+#ifdef CONFIG_PARAVIRT
18691+ PV_SAVE_REGS(CLBR_RDI)
18692+#endif
18693+
18694+ mov %cs,%rdi
18695+ cmp $__KERNEXEC_KERNEL_CS,%edi
18696+ jz 2f
18697+1:
18698+
18699+#ifdef CONFIG_PARAVIRT
18700+ PV_RESTORE_REGS(CLBR_RDI);
18701+#endif
18702+
18703+ popq %rdi
18704+ pax_force_retaddr
18705+ retq
18706+
18707+2: GET_CR0_INTO_RDI
18708+ btr $16,%rdi
18709+ ljmpq __KERNEL_CS,3f
18710+3: SET_RDI_INTO_CR0
18711+ jmp 1b
18712+ENDPROC(pax_exit_kernel)
18713+#endif
18714+
18715+ .macro pax_enter_kernel_user
18716+ pax_set_fptr_mask
18717+#ifdef CONFIG_PAX_MEMORY_UDEREF
18718+ call pax_enter_kernel_user
18719+#endif
18720+ .endm
18721+
18722+ .macro pax_exit_kernel_user
18723+#ifdef CONFIG_PAX_MEMORY_UDEREF
18724+ call pax_exit_kernel_user
18725+#endif
18726+#ifdef CONFIG_PAX_RANDKSTACK
18727+ pushq %rax
18728+ call pax_randomize_kstack
18729+ popq %rax
18730+#endif
18731+ .endm
18732+
18733+#ifdef CONFIG_PAX_MEMORY_UDEREF
18734+ENTRY(pax_enter_kernel_user)
18735+ pushq %rdi
18736+ pushq %rbx
18737+
18738+#ifdef CONFIG_PARAVIRT
18739+ PV_SAVE_REGS(CLBR_RDI)
18740+#endif
18741+
18742+ GET_CR3_INTO_RDI
18743+ mov %rdi,%rbx
18744+ add $__START_KERNEL_map,%rbx
18745+ sub phys_base(%rip),%rbx
18746+
18747+#ifdef CONFIG_PARAVIRT
18748+ pushq %rdi
18749+ cmpl $0, pv_info+PARAVIRT_enabled
18750+ jz 1f
18751+ i = 0
18752+ .rept USER_PGD_PTRS
18753+ mov i*8(%rbx),%rsi
18754+ mov $0,%sil
18755+ lea i*8(%rbx),%rdi
18756+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
18757+ i = i + 1
18758+ .endr
18759+ jmp 2f
18760+1:
18761+#endif
18762+
18763+ i = 0
18764+ .rept USER_PGD_PTRS
18765+ movb $0,i*8(%rbx)
18766+ i = i + 1
18767+ .endr
18768+
18769+#ifdef CONFIG_PARAVIRT
18770+2: popq %rdi
18771+#endif
18772+ SET_RDI_INTO_CR3
18773+
18774+#ifdef CONFIG_PAX_KERNEXEC
18775+ GET_CR0_INTO_RDI
18776+ bts $16,%rdi
18777+ SET_RDI_INTO_CR0
18778+#endif
18779+
18780+#ifdef CONFIG_PARAVIRT
18781+ PV_RESTORE_REGS(CLBR_RDI)
18782+#endif
18783+
18784+ popq %rbx
18785+ popq %rdi
18786+ pax_force_retaddr
18787+ retq
18788+ENDPROC(pax_enter_kernel_user)
18789+
18790+ENTRY(pax_exit_kernel_user)
18791+ push %rdi
18792+
18793+#ifdef CONFIG_PARAVIRT
18794+ pushq %rbx
18795+ PV_SAVE_REGS(CLBR_RDI)
18796+#endif
18797+
18798+#ifdef CONFIG_PAX_KERNEXEC
18799+ GET_CR0_INTO_RDI
18800+ btr $16,%rdi
18801+ SET_RDI_INTO_CR0
18802+#endif
18803+
18804+ GET_CR3_INTO_RDI
18805+ add $__START_KERNEL_map,%rdi
18806+ sub phys_base(%rip),%rdi
18807+
18808+#ifdef CONFIG_PARAVIRT
18809+ cmpl $0, pv_info+PARAVIRT_enabled
18810+ jz 1f
18811+ mov %rdi,%rbx
18812+ i = 0
18813+ .rept USER_PGD_PTRS
18814+ mov i*8(%rbx),%rsi
18815+ mov $0x67,%sil
18816+ lea i*8(%rbx),%rdi
18817+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
18818+ i = i + 1
18819+ .endr
18820+ jmp 2f
18821+1:
18822+#endif
18823+
18824+ i = 0
18825+ .rept USER_PGD_PTRS
18826+ movb $0x67,i*8(%rdi)
18827+ i = i + 1
18828+ .endr
18829+
18830+#ifdef CONFIG_PARAVIRT
18831+2: PV_RESTORE_REGS(CLBR_RDI)
18832+ popq %rbx
18833+#endif
18834+
18835+ popq %rdi
18836+ pax_force_retaddr
18837+ retq
18838+ENDPROC(pax_exit_kernel_user)
18839+#endif
18840+
18841+.macro pax_erase_kstack
18842+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18843+ call pax_erase_kstack
18844+#endif
18845+.endm
18846+
18847+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18848+ENTRY(pax_erase_kstack)
18849+ pushq %rdi
18850+ pushq %rcx
18851+ pushq %rax
18852+ pushq %r11
18853+
18854+ GET_THREAD_INFO(%r11)
18855+ mov TI_lowest_stack(%r11), %rdi
18856+ mov $-0xBEEF, %rax
18857+ std
18858+
18859+1: mov %edi, %ecx
18860+ and $THREAD_SIZE_asm - 1, %ecx
18861+ shr $3, %ecx
18862+ repne scasq
18863+ jecxz 2f
18864+
18865+ cmp $2*8, %ecx
18866+ jc 2f
18867+
18868+ mov $2*8, %ecx
18869+ repe scasq
18870+ jecxz 2f
18871+ jne 1b
18872+
18873+2: cld
18874+ mov %esp, %ecx
18875+ sub %edi, %ecx
18876+
18877+ cmp $THREAD_SIZE_asm, %rcx
18878+ jb 3f
18879+ ud2
18880+3:
18881+
18882+ shr $3, %ecx
18883+ rep stosq
18884+
18885+ mov TI_task_thread_sp0(%r11), %rdi
18886+ sub $256, %rdi
18887+ mov %rdi, TI_lowest_stack(%r11)
18888+
18889+ popq %r11
18890+ popq %rax
18891+ popq %rcx
18892+ popq %rdi
18893+ pax_force_retaddr
18894+ ret
18895+ENDPROC(pax_erase_kstack)
18896+#endif
18897
18898 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
18899 #ifdef CONFIG_TRACE_IRQFLAGS
18900@@ -375,8 +651,8 @@ ENDPROC(native_usergs_sysret64)
18901 .endm
18902
18903 .macro UNFAKE_STACK_FRAME
18904- addq $8*6, %rsp
18905- CFI_ADJUST_CFA_OFFSET -(6*8)
18906+ addq $8*6 + ARG_SKIP, %rsp
18907+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
18908 .endm
18909
18910 /*
18911@@ -463,7 +739,7 @@ ENDPROC(native_usergs_sysret64)
18912 movq %rsp, %rsi
18913
18914 leaq -RBP(%rsp),%rdi /* arg1 for handler */
18915- testl $3, CS-RBP(%rsi)
18916+ testb $3, CS-RBP(%rsi)
18917 je 1f
18918 SWAPGS
18919 /*
18920@@ -498,9 +774,10 @@ ENTRY(save_rest)
18921 movq_cfi r15, R15+16
18922 movq %r11, 8(%rsp) /* return address */
18923 FIXUP_TOP_OF_STACK %r11, 16
18924+ pax_force_retaddr
18925 ret
18926 CFI_ENDPROC
18927-END(save_rest)
18928+ENDPROC(save_rest)
18929
18930 /* save complete stack frame */
18931 .pushsection .kprobes.text, "ax"
18932@@ -529,9 +806,10 @@ ENTRY(save_paranoid)
18933 js 1f /* negative -> in kernel */
18934 SWAPGS
18935 xorl %ebx,%ebx
18936-1: ret
18937+1: pax_force_retaddr_bts
18938+ ret
18939 CFI_ENDPROC
18940-END(save_paranoid)
18941+ENDPROC(save_paranoid)
18942 .popsection
18943
18944 /*
18945@@ -553,7 +831,7 @@ ENTRY(ret_from_fork)
18946
18947 RESTORE_REST
18948
18949- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
18950+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
18951 jz 1f
18952
18953 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
18954@@ -571,7 +849,7 @@ ENTRY(ret_from_fork)
18955 RESTORE_REST
18956 jmp int_ret_from_sys_call
18957 CFI_ENDPROC
18958-END(ret_from_fork)
18959+ENDPROC(ret_from_fork)
18960
18961 /*
18962 * System call entry. Up to 6 arguments in registers are supported.
18963@@ -608,7 +886,7 @@ END(ret_from_fork)
18964 ENTRY(system_call)
18965 CFI_STARTPROC simple
18966 CFI_SIGNAL_FRAME
18967- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
18968+ CFI_DEF_CFA rsp,0
18969 CFI_REGISTER rip,rcx
18970 /*CFI_REGISTER rflags,r11*/
18971 SWAPGS_UNSAFE_STACK
18972@@ -621,16 +899,23 @@ GLOBAL(system_call_after_swapgs)
18973
18974 movq %rsp,PER_CPU_VAR(old_rsp)
18975 movq PER_CPU_VAR(kernel_stack),%rsp
18976+ SAVE_ARGS 8*6,0
18977+ pax_enter_kernel_user
18978+
18979+#ifdef CONFIG_PAX_RANDKSTACK
18980+ pax_erase_kstack
18981+#endif
18982+
18983 /*
18984 * No need to follow this irqs off/on section - it's straight
18985 * and short:
18986 */
18987 ENABLE_INTERRUPTS(CLBR_NONE)
18988- SAVE_ARGS 8,0
18989 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
18990 movq %rcx,RIP-ARGOFFSET(%rsp)
18991 CFI_REL_OFFSET rip,RIP-ARGOFFSET
18992- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
18993+ GET_THREAD_INFO(%rcx)
18994+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
18995 jnz tracesys
18996 system_call_fastpath:
18997 #if __SYSCALL_MASK == ~0
18998@@ -640,7 +925,7 @@ system_call_fastpath:
18999 cmpl $__NR_syscall_max,%eax
19000 #endif
19001 ja badsys
19002- movq %r10,%rcx
19003+ movq R10-ARGOFFSET(%rsp),%rcx
19004 call *sys_call_table(,%rax,8) # XXX: rip relative
19005 movq %rax,RAX-ARGOFFSET(%rsp)
19006 /*
19007@@ -654,10 +939,13 @@ sysret_check:
19008 LOCKDEP_SYS_EXIT
19009 DISABLE_INTERRUPTS(CLBR_NONE)
19010 TRACE_IRQS_OFF
19011- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19012+ GET_THREAD_INFO(%rcx)
19013+ movl TI_flags(%rcx),%edx
19014 andl %edi,%edx
19015 jnz sysret_careful
19016 CFI_REMEMBER_STATE
19017+ pax_exit_kernel_user
19018+ pax_erase_kstack
19019 /*
19020 * sysretq will re-enable interrupts:
19021 */
19022@@ -709,14 +997,18 @@ badsys:
19023 * jump back to the normal fast path.
19024 */
19025 auditsys:
19026- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19027+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19028 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19029 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19030 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19031 movq %rax,%rsi /* 2nd arg: syscall number */
19032 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19033 call __audit_syscall_entry
19034+
19035+ pax_erase_kstack
19036+
19037 LOAD_ARGS 0 /* reload call-clobbered registers */
19038+ pax_set_fptr_mask
19039 jmp system_call_fastpath
19040
19041 /*
19042@@ -737,7 +1029,7 @@ sysret_audit:
19043 /* Do syscall tracing */
19044 tracesys:
19045 #ifdef CONFIG_AUDITSYSCALL
19046- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19047+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19048 jz auditsys
19049 #endif
19050 SAVE_REST
19051@@ -745,12 +1037,16 @@ tracesys:
19052 FIXUP_TOP_OF_STACK %rdi
19053 movq %rsp,%rdi
19054 call syscall_trace_enter
19055+
19056+ pax_erase_kstack
19057+
19058 /*
19059 * Reload arg registers from stack in case ptrace changed them.
19060 * We don't reload %rax because syscall_trace_enter() returned
19061 * the value it wants us to use in the table lookup.
19062 */
19063 LOAD_ARGS ARGOFFSET, 1
19064+ pax_set_fptr_mask
19065 RESTORE_REST
19066 #if __SYSCALL_MASK == ~0
19067 cmpq $__NR_syscall_max,%rax
19068@@ -759,7 +1055,7 @@ tracesys:
19069 cmpl $__NR_syscall_max,%eax
19070 #endif
19071 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19072- movq %r10,%rcx /* fixup for C */
19073+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19074 call *sys_call_table(,%rax,8)
19075 movq %rax,RAX-ARGOFFSET(%rsp)
19076 /* Use IRET because user could have changed frame */
19077@@ -780,7 +1076,9 @@ GLOBAL(int_with_check)
19078 andl %edi,%edx
19079 jnz int_careful
19080 andl $~TS_COMPAT,TI_status(%rcx)
19081- jmp retint_swapgs
19082+ pax_exit_kernel_user
19083+ pax_erase_kstack
19084+ jmp retint_swapgs_pax
19085
19086 /* Either reschedule or signal or syscall exit tracking needed. */
19087 /* First do a reschedule test. */
19088@@ -826,7 +1124,7 @@ int_restore_rest:
19089 TRACE_IRQS_OFF
19090 jmp int_with_check
19091 CFI_ENDPROC
19092-END(system_call)
19093+ENDPROC(system_call)
19094
19095 /*
19096 * Certain special system calls that need to save a complete full stack frame.
19097@@ -842,7 +1140,7 @@ ENTRY(\label)
19098 call \func
19099 jmp ptregscall_common
19100 CFI_ENDPROC
19101-END(\label)
19102+ENDPROC(\label)
19103 .endm
19104
19105 .macro FORK_LIKE func
19106@@ -856,9 +1154,10 @@ ENTRY(stub_\func)
19107 DEFAULT_FRAME 0 8 /* offset 8: return address */
19108 call sys_\func
19109 RESTORE_TOP_OF_STACK %r11, 8
19110+ pax_force_retaddr
19111 ret $REST_SKIP /* pop extended registers */
19112 CFI_ENDPROC
19113-END(stub_\func)
19114+ENDPROC(stub_\func)
19115 .endm
19116
19117 FORK_LIKE clone
19118@@ -875,9 +1174,10 @@ ENTRY(ptregscall_common)
19119 movq_cfi_restore R12+8, r12
19120 movq_cfi_restore RBP+8, rbp
19121 movq_cfi_restore RBX+8, rbx
19122+ pax_force_retaddr
19123 ret $REST_SKIP /* pop extended registers */
19124 CFI_ENDPROC
19125-END(ptregscall_common)
19126+ENDPROC(ptregscall_common)
19127
19128 ENTRY(stub_execve)
19129 CFI_STARTPROC
19130@@ -891,7 +1191,7 @@ ENTRY(stub_execve)
19131 RESTORE_REST
19132 jmp int_ret_from_sys_call
19133 CFI_ENDPROC
19134-END(stub_execve)
19135+ENDPROC(stub_execve)
19136
19137 /*
19138 * sigreturn is special because it needs to restore all registers on return.
19139@@ -909,7 +1209,7 @@ ENTRY(stub_rt_sigreturn)
19140 RESTORE_REST
19141 jmp int_ret_from_sys_call
19142 CFI_ENDPROC
19143-END(stub_rt_sigreturn)
19144+ENDPROC(stub_rt_sigreturn)
19145
19146 #ifdef CONFIG_X86_X32_ABI
19147 ENTRY(stub_x32_rt_sigreturn)
19148@@ -975,7 +1275,7 @@ vector=vector+1
19149 2: jmp common_interrupt
19150 .endr
19151 CFI_ENDPROC
19152-END(irq_entries_start)
19153+ENDPROC(irq_entries_start)
19154
19155 .previous
19156 END(interrupt)
19157@@ -995,6 +1295,16 @@ END(interrupt)
19158 subq $ORIG_RAX-RBP, %rsp
19159 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19160 SAVE_ARGS_IRQ
19161+#ifdef CONFIG_PAX_MEMORY_UDEREF
19162+ testb $3, CS(%rdi)
19163+ jnz 1f
19164+ pax_enter_kernel
19165+ jmp 2f
19166+1: pax_enter_kernel_user
19167+2:
19168+#else
19169+ pax_enter_kernel
19170+#endif
19171 call \func
19172 .endm
19173
19174@@ -1027,7 +1337,7 @@ ret_from_intr:
19175
19176 exit_intr:
19177 GET_THREAD_INFO(%rcx)
19178- testl $3,CS-ARGOFFSET(%rsp)
19179+ testb $3,CS-ARGOFFSET(%rsp)
19180 je retint_kernel
19181
19182 /* Interrupt came from user space */
19183@@ -1049,12 +1359,16 @@ retint_swapgs: /* return to user-space */
19184 * The iretq could re-enable interrupts:
19185 */
19186 DISABLE_INTERRUPTS(CLBR_ANY)
19187+ pax_exit_kernel_user
19188+retint_swapgs_pax:
19189 TRACE_IRQS_IRETQ
19190 SWAPGS
19191 jmp restore_args
19192
19193 retint_restore_args: /* return to kernel space */
19194 DISABLE_INTERRUPTS(CLBR_ANY)
19195+ pax_exit_kernel
19196+ pax_force_retaddr (RIP-ARGOFFSET)
19197 /*
19198 * The iretq could re-enable interrupts:
19199 */
19200@@ -1137,7 +1451,7 @@ ENTRY(retint_kernel)
19201 #endif
19202
19203 CFI_ENDPROC
19204-END(common_interrupt)
19205+ENDPROC(common_interrupt)
19206 /*
19207 * End of kprobes section
19208 */
19209@@ -1155,7 +1469,7 @@ ENTRY(\sym)
19210 interrupt \do_sym
19211 jmp ret_from_intr
19212 CFI_ENDPROC
19213-END(\sym)
19214+ENDPROC(\sym)
19215 .endm
19216
19217 #ifdef CONFIG_SMP
19218@@ -1211,12 +1525,22 @@ ENTRY(\sym)
19219 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19220 call error_entry
19221 DEFAULT_FRAME 0
19222+#ifdef CONFIG_PAX_MEMORY_UDEREF
19223+ testb $3, CS(%rsp)
19224+ jnz 1f
19225+ pax_enter_kernel
19226+ jmp 2f
19227+1: pax_enter_kernel_user
19228+2:
19229+#else
19230+ pax_enter_kernel
19231+#endif
19232 movq %rsp,%rdi /* pt_regs pointer */
19233 xorl %esi,%esi /* no error code */
19234 call \do_sym
19235 jmp error_exit /* %ebx: no swapgs flag */
19236 CFI_ENDPROC
19237-END(\sym)
19238+ENDPROC(\sym)
19239 .endm
19240
19241 .macro paranoidzeroentry sym do_sym
19242@@ -1229,15 +1553,25 @@ ENTRY(\sym)
19243 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19244 call save_paranoid
19245 TRACE_IRQS_OFF
19246+#ifdef CONFIG_PAX_MEMORY_UDEREF
19247+ testb $3, CS(%rsp)
19248+ jnz 1f
19249+ pax_enter_kernel
19250+ jmp 2f
19251+1: pax_enter_kernel_user
19252+2:
19253+#else
19254+ pax_enter_kernel
19255+#endif
19256 movq %rsp,%rdi /* pt_regs pointer */
19257 xorl %esi,%esi /* no error code */
19258 call \do_sym
19259 jmp paranoid_exit /* %ebx: no swapgs flag */
19260 CFI_ENDPROC
19261-END(\sym)
19262+ENDPROC(\sym)
19263 .endm
19264
19265-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19266+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19267 .macro paranoidzeroentry_ist sym do_sym ist
19268 ENTRY(\sym)
19269 INTR_FRAME
19270@@ -1248,14 +1582,30 @@ ENTRY(\sym)
19271 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19272 call save_paranoid
19273 TRACE_IRQS_OFF_DEBUG
19274+#ifdef CONFIG_PAX_MEMORY_UDEREF
19275+ testb $3, CS(%rsp)
19276+ jnz 1f
19277+ pax_enter_kernel
19278+ jmp 2f
19279+1: pax_enter_kernel_user
19280+2:
19281+#else
19282+ pax_enter_kernel
19283+#endif
19284 movq %rsp,%rdi /* pt_regs pointer */
19285 xorl %esi,%esi /* no error code */
19286+#ifdef CONFIG_SMP
19287+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19288+ lea init_tss(%r12), %r12
19289+#else
19290+ lea init_tss(%rip), %r12
19291+#endif
19292 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19293 call \do_sym
19294 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19295 jmp paranoid_exit /* %ebx: no swapgs flag */
19296 CFI_ENDPROC
19297-END(\sym)
19298+ENDPROC(\sym)
19299 .endm
19300
19301 .macro errorentry sym do_sym
19302@@ -1267,13 +1617,23 @@ ENTRY(\sym)
19303 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19304 call error_entry
19305 DEFAULT_FRAME 0
19306+#ifdef CONFIG_PAX_MEMORY_UDEREF
19307+ testb $3, CS(%rsp)
19308+ jnz 1f
19309+ pax_enter_kernel
19310+ jmp 2f
19311+1: pax_enter_kernel_user
19312+2:
19313+#else
19314+ pax_enter_kernel
19315+#endif
19316 movq %rsp,%rdi /* pt_regs pointer */
19317 movq ORIG_RAX(%rsp),%rsi /* get error code */
19318 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19319 call \do_sym
19320 jmp error_exit /* %ebx: no swapgs flag */
19321 CFI_ENDPROC
19322-END(\sym)
19323+ENDPROC(\sym)
19324 .endm
19325
19326 /* error code is on the stack already */
19327@@ -1287,13 +1647,23 @@ ENTRY(\sym)
19328 call save_paranoid
19329 DEFAULT_FRAME 0
19330 TRACE_IRQS_OFF
19331+#ifdef CONFIG_PAX_MEMORY_UDEREF
19332+ testb $3, CS(%rsp)
19333+ jnz 1f
19334+ pax_enter_kernel
19335+ jmp 2f
19336+1: pax_enter_kernel_user
19337+2:
19338+#else
19339+ pax_enter_kernel
19340+#endif
19341 movq %rsp,%rdi /* pt_regs pointer */
19342 movq ORIG_RAX(%rsp),%rsi /* get error code */
19343 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19344 call \do_sym
19345 jmp paranoid_exit /* %ebx: no swapgs flag */
19346 CFI_ENDPROC
19347-END(\sym)
19348+ENDPROC(\sym)
19349 .endm
19350
19351 zeroentry divide_error do_divide_error
19352@@ -1323,9 +1693,10 @@ gs_change:
19353 2: mfence /* workaround */
19354 SWAPGS
19355 popfq_cfi
19356+ pax_force_retaddr
19357 ret
19358 CFI_ENDPROC
19359-END(native_load_gs_index)
19360+ENDPROC(native_load_gs_index)
19361
19362 _ASM_EXTABLE(gs_change,bad_gs)
19363 .section .fixup,"ax"
19364@@ -1353,9 +1724,10 @@ ENTRY(call_softirq)
19365 CFI_DEF_CFA_REGISTER rsp
19366 CFI_ADJUST_CFA_OFFSET -8
19367 decl PER_CPU_VAR(irq_count)
19368+ pax_force_retaddr
19369 ret
19370 CFI_ENDPROC
19371-END(call_softirq)
19372+ENDPROC(call_softirq)
19373
19374 #ifdef CONFIG_XEN
19375 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19376@@ -1393,7 +1765,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19377 decl PER_CPU_VAR(irq_count)
19378 jmp error_exit
19379 CFI_ENDPROC
19380-END(xen_do_hypervisor_callback)
19381+ENDPROC(xen_do_hypervisor_callback)
19382
19383 /*
19384 * Hypervisor uses this for application faults while it executes.
19385@@ -1452,7 +1824,7 @@ ENTRY(xen_failsafe_callback)
19386 SAVE_ALL
19387 jmp error_exit
19388 CFI_ENDPROC
19389-END(xen_failsafe_callback)
19390+ENDPROC(xen_failsafe_callback)
19391
19392 apicinterrupt XEN_HVM_EVTCHN_CALLBACK \
19393 xen_hvm_callback_vector xen_evtchn_do_upcall
19394@@ -1501,16 +1873,31 @@ ENTRY(paranoid_exit)
19395 TRACE_IRQS_OFF_DEBUG
19396 testl %ebx,%ebx /* swapgs needed? */
19397 jnz paranoid_restore
19398- testl $3,CS(%rsp)
19399+ testb $3,CS(%rsp)
19400 jnz paranoid_userspace
19401+#ifdef CONFIG_PAX_MEMORY_UDEREF
19402+ pax_exit_kernel
19403+ TRACE_IRQS_IRETQ 0
19404+ SWAPGS_UNSAFE_STACK
19405+ RESTORE_ALL 8
19406+ pax_force_retaddr_bts
19407+ jmp irq_return
19408+#endif
19409 paranoid_swapgs:
19410+#ifdef CONFIG_PAX_MEMORY_UDEREF
19411+ pax_exit_kernel_user
19412+#else
19413+ pax_exit_kernel
19414+#endif
19415 TRACE_IRQS_IRETQ 0
19416 SWAPGS_UNSAFE_STACK
19417 RESTORE_ALL 8
19418 jmp irq_return
19419 paranoid_restore:
19420+ pax_exit_kernel
19421 TRACE_IRQS_IRETQ_DEBUG 0
19422 RESTORE_ALL 8
19423+ pax_force_retaddr_bts
19424 jmp irq_return
19425 paranoid_userspace:
19426 GET_THREAD_INFO(%rcx)
19427@@ -1539,7 +1926,7 @@ paranoid_schedule:
19428 TRACE_IRQS_OFF
19429 jmp paranoid_userspace
19430 CFI_ENDPROC
19431-END(paranoid_exit)
19432+ENDPROC(paranoid_exit)
19433
19434 /*
19435 * Exception entry point. This expects an error code/orig_rax on the stack.
19436@@ -1566,12 +1953,13 @@ ENTRY(error_entry)
19437 movq_cfi r14, R14+8
19438 movq_cfi r15, R15+8
19439 xorl %ebx,%ebx
19440- testl $3,CS+8(%rsp)
19441+ testb $3,CS+8(%rsp)
19442 je error_kernelspace
19443 error_swapgs:
19444 SWAPGS
19445 error_sti:
19446 TRACE_IRQS_OFF
19447+ pax_force_retaddr_bts
19448 ret
19449
19450 /*
19451@@ -1598,7 +1986,7 @@ bstep_iret:
19452 movq %rcx,RIP+8(%rsp)
19453 jmp error_swapgs
19454 CFI_ENDPROC
19455-END(error_entry)
19456+ENDPROC(error_entry)
19457
19458
19459 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19460@@ -1618,7 +2006,7 @@ ENTRY(error_exit)
19461 jnz retint_careful
19462 jmp retint_swapgs
19463 CFI_ENDPROC
19464-END(error_exit)
19465+ENDPROC(error_exit)
19466
19467 /*
19468 * Test if a given stack is an NMI stack or not.
19469@@ -1676,9 +2064,11 @@ ENTRY(nmi)
19470 * If %cs was not the kernel segment, then the NMI triggered in user
19471 * space, which means it is definitely not nested.
19472 */
19473+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19474+ je 1f
19475 cmpl $__KERNEL_CS, 16(%rsp)
19476 jne first_nmi
19477-
19478+1:
19479 /*
19480 * Check the special variable on the stack to see if NMIs are
19481 * executing.
19482@@ -1847,6 +2237,17 @@ end_repeat_nmi:
19483 */
19484 movq %cr2, %r12
19485
19486+#ifdef CONFIG_PAX_MEMORY_UDEREF
19487+ testb $3, CS(%rsp)
19488+ jnz 1f
19489+ pax_enter_kernel
19490+ jmp 2f
19491+1: pax_enter_kernel_user
19492+2:
19493+#else
19494+ pax_enter_kernel
19495+#endif
19496+
19497 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
19498 movq %rsp,%rdi
19499 movq $-1,%rsi
19500@@ -1862,23 +2263,34 @@ end_repeat_nmi:
19501 testl %ebx,%ebx /* swapgs needed? */
19502 jnz nmi_restore
19503 nmi_swapgs:
19504+#ifdef CONFIG_PAX_MEMORY_UDEREF
19505+ pax_exit_kernel_user
19506+#else
19507+ pax_exit_kernel
19508+#endif
19509 SWAPGS_UNSAFE_STACK
19510+ RESTORE_ALL 6*8
19511+ /* Clear the NMI executing stack variable */
19512+ movq $0, 5*8(%rsp)
19513+ jmp irq_return
19514 nmi_restore:
19515+ pax_exit_kernel
19516 /* Pop the extra iret frame at once */
19517 RESTORE_ALL 6*8
19518+ pax_force_retaddr_bts
19519
19520 /* Clear the NMI executing stack variable */
19521 movq $0, 5*8(%rsp)
19522 jmp irq_return
19523 CFI_ENDPROC
19524-END(nmi)
19525+ENDPROC(nmi)
19526
19527 ENTRY(ignore_sysret)
19528 CFI_STARTPROC
19529 mov $-ENOSYS,%eax
19530 sysret
19531 CFI_ENDPROC
19532-END(ignore_sysret)
19533+ENDPROC(ignore_sysret)
19534
19535 /*
19536 * End of kprobes section
19537diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
19538index 1d41402..af9a46a 100644
19539--- a/arch/x86/kernel/ftrace.c
19540+++ b/arch/x86/kernel/ftrace.c
19541@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
19542 {
19543 unsigned char replaced[MCOUNT_INSN_SIZE];
19544
19545+ ip = ktla_ktva(ip);
19546+
19547 /*
19548 * Note: Due to modules and __init, code can
19549 * disappear and change, we need to protect against faulting
19550@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19551 unsigned char old[MCOUNT_INSN_SIZE], *new;
19552 int ret;
19553
19554- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
19555+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
19556 new = ftrace_call_replace(ip, (unsigned long)func);
19557
19558 /* See comment above by declaration of modifying_ftrace_code */
19559@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
19560 /* Also update the regs callback function */
19561 if (!ret) {
19562 ip = (unsigned long)(&ftrace_regs_call);
19563- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
19564+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
19565 new = ftrace_call_replace(ip, (unsigned long)func);
19566 ret = ftrace_modify_code(ip, old, new);
19567 }
19568@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
19569 * kernel identity mapping to modify code.
19570 */
19571 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
19572- ip = (unsigned long)__va(__pa(ip));
19573+ ip = (unsigned long)__va(__pa(ktla_ktva(ip)));
19574
19575 return probe_kernel_write((void *)ip, val, size);
19576 }
19577@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
19578 unsigned char replaced[MCOUNT_INSN_SIZE];
19579 unsigned char brk = BREAKPOINT_INSTRUCTION;
19580
19581- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
19582+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
19583 return -EFAULT;
19584
19585 /* Make sure it is what we expect it to be */
19586@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
19587 return ret;
19588
19589 fail_update:
19590- probe_kernel_write((void *)ip, &old_code[0], 1);
19591+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
19592 goto out;
19593 }
19594
19595@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
19596 {
19597 unsigned char code[MCOUNT_INSN_SIZE];
19598
19599+ ip = ktla_ktva(ip);
19600+
19601 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
19602 return -EFAULT;
19603
19604diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
19605index c18f59d..9c0c9f6 100644
19606--- a/arch/x86/kernel/head32.c
19607+++ b/arch/x86/kernel/head32.c
19608@@ -18,6 +18,7 @@
19609 #include <asm/io_apic.h>
19610 #include <asm/bios_ebda.h>
19611 #include <asm/tlbflush.h>
19612+#include <asm/boot.h>
19613
19614 static void __init i386_default_early_setup(void)
19615 {
19616@@ -30,8 +31,7 @@ static void __init i386_default_early_setup(void)
19617
19618 void __init i386_start_kernel(void)
19619 {
19620- memblock_reserve(__pa_symbol(&_text),
19621- __pa_symbol(&__bss_stop) - __pa_symbol(&_text));
19622+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop) - LOAD_PHYSICAL_ADDR);
19623
19624 #ifdef CONFIG_BLK_DEV_INITRD
19625 /* Reserve INITRD */
19626diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
19627index c8932c7..d56b622 100644
19628--- a/arch/x86/kernel/head_32.S
19629+++ b/arch/x86/kernel/head_32.S
19630@@ -26,6 +26,12 @@
19631 /* Physical address */
19632 #define pa(X) ((X) - __PAGE_OFFSET)
19633
19634+#ifdef CONFIG_PAX_KERNEXEC
19635+#define ta(X) (X)
19636+#else
19637+#define ta(X) ((X) - __PAGE_OFFSET)
19638+#endif
19639+
19640 /*
19641 * References to members of the new_cpu_data structure.
19642 */
19643@@ -55,11 +61,7 @@
19644 * and small than max_low_pfn, otherwise will waste some page table entries
19645 */
19646
19647-#if PTRS_PER_PMD > 1
19648-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
19649-#else
19650-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
19651-#endif
19652+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
19653
19654 /* Number of possible pages in the lowmem region */
19655 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
19656@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
19657 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19658
19659 /*
19660+ * Real beginning of normal "text" segment
19661+ */
19662+ENTRY(stext)
19663+ENTRY(_stext)
19664+
19665+/*
19666 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
19667 * %esi points to the real-mode code as a 32-bit pointer.
19668 * CS and DS must be 4 GB flat segments, but we don't depend on
19669@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
19670 * can.
19671 */
19672 __HEAD
19673+
19674+#ifdef CONFIG_PAX_KERNEXEC
19675+ jmp startup_32
19676+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
19677+.fill PAGE_SIZE-5,1,0xcc
19678+#endif
19679+
19680 ENTRY(startup_32)
19681 movl pa(stack_start),%ecx
19682
19683@@ -106,6 +121,59 @@ ENTRY(startup_32)
19684 2:
19685 leal -__PAGE_OFFSET(%ecx),%esp
19686
19687+#ifdef CONFIG_SMP
19688+ movl $pa(cpu_gdt_table),%edi
19689+ movl $__per_cpu_load,%eax
19690+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
19691+ rorl $16,%eax
19692+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
19693+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
19694+ movl $__per_cpu_end - 1,%eax
19695+ subl $__per_cpu_start,%eax
19696+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
19697+#endif
19698+
19699+#ifdef CONFIG_PAX_MEMORY_UDEREF
19700+ movl $NR_CPUS,%ecx
19701+ movl $pa(cpu_gdt_table),%edi
19702+1:
19703+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
19704+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
19705+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
19706+ addl $PAGE_SIZE_asm,%edi
19707+ loop 1b
19708+#endif
19709+
19710+#ifdef CONFIG_PAX_KERNEXEC
19711+ movl $pa(boot_gdt),%edi
19712+ movl $__LOAD_PHYSICAL_ADDR,%eax
19713+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
19714+ rorl $16,%eax
19715+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
19716+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
19717+ rorl $16,%eax
19718+
19719+ ljmp $(__BOOT_CS),$1f
19720+1:
19721+
19722+ movl $NR_CPUS,%ecx
19723+ movl $pa(cpu_gdt_table),%edi
19724+ addl $__PAGE_OFFSET,%eax
19725+1:
19726+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
19727+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
19728+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
19729+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
19730+ rorl $16,%eax
19731+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
19732+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
19733+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
19734+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
19735+ rorl $16,%eax
19736+ addl $PAGE_SIZE_asm,%edi
19737+ loop 1b
19738+#endif
19739+
19740 /*
19741 * Clear BSS first so that there are no surprises...
19742 */
19743@@ -196,8 +264,11 @@ ENTRY(startup_32)
19744 movl %eax, pa(max_pfn_mapped)
19745
19746 /* Do early initialization of the fixmap area */
19747- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
19748- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
19749+#ifdef CONFIG_COMPAT_VDSO
19750+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
19751+#else
19752+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
19753+#endif
19754 #else /* Not PAE */
19755
19756 page_pde_offset = (__PAGE_OFFSET >> 20);
19757@@ -227,8 +298,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
19758 movl %eax, pa(max_pfn_mapped)
19759
19760 /* Do early initialization of the fixmap area */
19761- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
19762- movl %eax,pa(initial_page_table+0xffc)
19763+#ifdef CONFIG_COMPAT_VDSO
19764+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
19765+#else
19766+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
19767+#endif
19768 #endif
19769
19770 #ifdef CONFIG_PARAVIRT
19771@@ -242,9 +316,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
19772 cmpl $num_subarch_entries, %eax
19773 jae bad_subarch
19774
19775- movl pa(subarch_entries)(,%eax,4), %eax
19776- subl $__PAGE_OFFSET, %eax
19777- jmp *%eax
19778+ jmp *pa(subarch_entries)(,%eax,4)
19779
19780 bad_subarch:
19781 WEAK(lguest_entry)
19782@@ -256,10 +328,10 @@ WEAK(xen_entry)
19783 __INITDATA
19784
19785 subarch_entries:
19786- .long default_entry /* normal x86/PC */
19787- .long lguest_entry /* lguest hypervisor */
19788- .long xen_entry /* Xen hypervisor */
19789- .long default_entry /* Moorestown MID */
19790+ .long ta(default_entry) /* normal x86/PC */
19791+ .long ta(lguest_entry) /* lguest hypervisor */
19792+ .long ta(xen_entry) /* Xen hypervisor */
19793+ .long ta(default_entry) /* Moorestown MID */
19794 num_subarch_entries = (. - subarch_entries) / 4
19795 .previous
19796 #else
19797@@ -335,6 +407,7 @@ default_entry:
19798 movl pa(mmu_cr4_features),%eax
19799 movl %eax,%cr4
19800
19801+#ifdef CONFIG_X86_PAE
19802 testb $X86_CR4_PAE, %al # check if PAE is enabled
19803 jz 6f
19804
19805@@ -363,6 +436,9 @@ default_entry:
19806 /* Make changes effective */
19807 wrmsr
19808
19809+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
19810+#endif
19811+
19812 6:
19813
19814 /*
19815@@ -460,14 +536,20 @@ is386: movl $2,%ecx # set MP
19816 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
19817 movl %eax,%ss # after changing gdt.
19818
19819- movl $(__USER_DS),%eax # DS/ES contains default USER segment
19820+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
19821 movl %eax,%ds
19822 movl %eax,%es
19823
19824 movl $(__KERNEL_PERCPU), %eax
19825 movl %eax,%fs # set this cpu's percpu
19826
19827+#ifdef CONFIG_CC_STACKPROTECTOR
19828 movl $(__KERNEL_STACK_CANARY),%eax
19829+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
19830+ movl $(__USER_DS),%eax
19831+#else
19832+ xorl %eax,%eax
19833+#endif
19834 movl %eax,%gs
19835
19836 xorl %eax,%eax # Clear LDT
19837@@ -544,8 +626,11 @@ setup_once:
19838 * relocation. Manually set base address in stack canary
19839 * segment descriptor.
19840 */
19841- movl $gdt_page,%eax
19842+ movl $cpu_gdt_table,%eax
19843 movl $stack_canary,%ecx
19844+#ifdef CONFIG_SMP
19845+ addl $__per_cpu_load,%ecx
19846+#endif
19847 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
19848 shrl $16, %ecx
19849 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
19850@@ -576,7 +661,7 @@ ENDPROC(early_idt_handlers)
19851 /* This is global to keep gas from relaxing the jumps */
19852 ENTRY(early_idt_handler)
19853 cld
19854- cmpl $2,%ss:early_recursion_flag
19855+ cmpl $1,%ss:early_recursion_flag
19856 je hlt_loop
19857 incl %ss:early_recursion_flag
19858
19859@@ -614,8 +699,8 @@ ENTRY(early_idt_handler)
19860 pushl (20+6*4)(%esp) /* trapno */
19861 pushl $fault_msg
19862 call printk
19863-#endif
19864 call dump_stack
19865+#endif
19866 hlt_loop:
19867 hlt
19868 jmp hlt_loop
19869@@ -634,8 +719,11 @@ ENDPROC(early_idt_handler)
19870 /* This is the default interrupt "handler" :-) */
19871 ALIGN
19872 ignore_int:
19873- cld
19874 #ifdef CONFIG_PRINTK
19875+ cmpl $2,%ss:early_recursion_flag
19876+ je hlt_loop
19877+ incl %ss:early_recursion_flag
19878+ cld
19879 pushl %eax
19880 pushl %ecx
19881 pushl %edx
19882@@ -644,9 +732,6 @@ ignore_int:
19883 movl $(__KERNEL_DS),%eax
19884 movl %eax,%ds
19885 movl %eax,%es
19886- cmpl $2,early_recursion_flag
19887- je hlt_loop
19888- incl early_recursion_flag
19889 pushl 16(%esp)
19890 pushl 24(%esp)
19891 pushl 32(%esp)
19892@@ -680,29 +765,43 @@ ENTRY(setup_once_ref)
19893 /*
19894 * BSS section
19895 */
19896-__PAGE_ALIGNED_BSS
19897- .align PAGE_SIZE
19898 #ifdef CONFIG_X86_PAE
19899+.section .initial_pg_pmd,"a",@progbits
19900 initial_pg_pmd:
19901 .fill 1024*KPMDS,4,0
19902 #else
19903+.section .initial_page_table,"a",@progbits
19904 ENTRY(initial_page_table)
19905 .fill 1024,4,0
19906 #endif
19907+.section .initial_pg_fixmap,"a",@progbits
19908 initial_pg_fixmap:
19909 .fill 1024,4,0
19910+.section .empty_zero_page,"a",@progbits
19911 ENTRY(empty_zero_page)
19912 .fill 4096,1,0
19913+.section .swapper_pg_dir,"a",@progbits
19914 ENTRY(swapper_pg_dir)
19915+#ifdef CONFIG_X86_PAE
19916+ .fill 4,8,0
19917+#else
19918 .fill 1024,4,0
19919+#endif
19920+
19921+/*
19922+ * The IDT has to be page-aligned to simplify the Pentium
19923+ * F0 0F bug workaround.. We have a special link segment
19924+ * for this.
19925+ */
19926+.section .idt,"a",@progbits
19927+ENTRY(idt_table)
19928+ .fill 256,8,0
19929
19930 /*
19931 * This starts the data section.
19932 */
19933 #ifdef CONFIG_X86_PAE
19934-__PAGE_ALIGNED_DATA
19935- /* Page-aligned for the benefit of paravirt? */
19936- .align PAGE_SIZE
19937+.section .initial_page_table,"a",@progbits
19938 ENTRY(initial_page_table)
19939 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
19940 # if KPMDS == 3
19941@@ -721,12 +820,20 @@ ENTRY(initial_page_table)
19942 # error "Kernel PMDs should be 1, 2 or 3"
19943 # endif
19944 .align PAGE_SIZE /* needs to be page-sized too */
19945+
19946+#ifdef CONFIG_PAX_PER_CPU_PGD
19947+ENTRY(cpu_pgd)
19948+ .rept NR_CPUS
19949+ .fill 4,8,0
19950+ .endr
19951+#endif
19952+
19953 #endif
19954
19955 .data
19956 .balign 4
19957 ENTRY(stack_start)
19958- .long init_thread_union+THREAD_SIZE
19959+ .long init_thread_union+THREAD_SIZE-8
19960
19961 __INITRODATA
19962 int_msg:
19963@@ -754,7 +861,7 @@ fault_msg:
19964 * segment size, and 32-bit linear address value:
19965 */
19966
19967- .data
19968+.section .rodata,"a",@progbits
19969 .globl boot_gdt_descr
19970 .globl idt_descr
19971
19972@@ -763,7 +870,7 @@ fault_msg:
19973 .word 0 # 32 bit align gdt_desc.address
19974 boot_gdt_descr:
19975 .word __BOOT_DS+7
19976- .long boot_gdt - __PAGE_OFFSET
19977+ .long pa(boot_gdt)
19978
19979 .word 0 # 32-bit align idt_desc.address
19980 idt_descr:
19981@@ -774,7 +881,7 @@ idt_descr:
19982 .word 0 # 32 bit align gdt_desc.address
19983 ENTRY(early_gdt_descr)
19984 .word GDT_ENTRIES*8-1
19985- .long gdt_page /* Overwritten for secondary CPUs */
19986+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
19987
19988 /*
19989 * The boot_gdt must mirror the equivalent in setup.S and is
19990@@ -783,5 +890,65 @@ ENTRY(early_gdt_descr)
19991 .align L1_CACHE_BYTES
19992 ENTRY(boot_gdt)
19993 .fill GDT_ENTRY_BOOT_CS,8,0
19994- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
19995- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
19996+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
19997+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
19998+
19999+ .align PAGE_SIZE_asm
20000+ENTRY(cpu_gdt_table)
20001+ .rept NR_CPUS
20002+ .quad 0x0000000000000000 /* NULL descriptor */
20003+ .quad 0x0000000000000000 /* 0x0b reserved */
20004+ .quad 0x0000000000000000 /* 0x13 reserved */
20005+ .quad 0x0000000000000000 /* 0x1b reserved */
20006+
20007+#ifdef CONFIG_PAX_KERNEXEC
20008+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20009+#else
20010+ .quad 0x0000000000000000 /* 0x20 unused */
20011+#endif
20012+
20013+ .quad 0x0000000000000000 /* 0x28 unused */
20014+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20015+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20016+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20017+ .quad 0x0000000000000000 /* 0x4b reserved */
20018+ .quad 0x0000000000000000 /* 0x53 reserved */
20019+ .quad 0x0000000000000000 /* 0x5b reserved */
20020+
20021+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20022+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20023+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20024+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20025+
20026+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20027+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20028+
20029+ /*
20030+ * Segments used for calling PnP BIOS have byte granularity.
20031+ * The code segments and data segments have fixed 64k limits,
20032+ * the transfer segment sizes are set at run time.
20033+ */
20034+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20035+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20036+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20037+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20038+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20039+
20040+ /*
20041+ * The APM segments have byte granularity and their bases
20042+ * are set at run time. All have 64k limits.
20043+ */
20044+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20045+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20046+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20047+
20048+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20049+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20050+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20051+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20052+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20053+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20054+
20055+ /* Be sure this is zeroed to avoid false validations in Xen */
20056+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20057+ .endr
20058diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20059index 980053c..74d3b44 100644
20060--- a/arch/x86/kernel/head_64.S
20061+++ b/arch/x86/kernel/head_64.S
20062@@ -20,6 +20,8 @@
20063 #include <asm/processor-flags.h>
20064 #include <asm/percpu.h>
20065 #include <asm/nops.h>
20066+#include <asm/cpufeature.h>
20067+#include <asm/alternative-asm.h>
20068
20069 #ifdef CONFIG_PARAVIRT
20070 #include <asm/asm-offsets.h>
20071@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20072 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20073 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20074 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20075+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20076+L3_VMALLOC_START = pud_index(VMALLOC_START)
20077+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20078+L3_VMALLOC_END = pud_index(VMALLOC_END)
20079+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20080+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20081
20082 .text
20083 __HEAD
20084@@ -88,35 +96,23 @@ startup_64:
20085 */
20086 addq %rbp, init_level4_pgt + 0(%rip)
20087 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20088+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20089+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20090+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20091 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20092
20093 addq %rbp, level3_ident_pgt + 0(%rip)
20094+#ifndef CONFIG_XEN
20095+ addq %rbp, level3_ident_pgt + 8(%rip)
20096+#endif
20097
20098- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20099- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20100+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20101+
20102+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20103+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
20104
20105 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20106-
20107- /* Add an Identity mapping if I am above 1G */
20108- leaq _text(%rip), %rdi
20109- andq $PMD_PAGE_MASK, %rdi
20110-
20111- movq %rdi, %rax
20112- shrq $PUD_SHIFT, %rax
20113- andq $(PTRS_PER_PUD - 1), %rax
20114- jz ident_complete
20115-
20116- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
20117- leaq level3_ident_pgt(%rip), %rbx
20118- movq %rdx, 0(%rbx, %rax, 8)
20119-
20120- movq %rdi, %rax
20121- shrq $PMD_SHIFT, %rax
20122- andq $(PTRS_PER_PMD - 1), %rax
20123- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
20124- leaq level2_spare_pgt(%rip), %rbx
20125- movq %rdx, 0(%rbx, %rax, 8)
20126-ident_complete:
20127+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20128
20129 /*
20130 * Fixup the kernel text+data virtual addresses. Note that
20131@@ -159,8 +155,8 @@ ENTRY(secondary_startup_64)
20132 * after the boot processor executes this code.
20133 */
20134
20135- /* Enable PAE mode and PGE */
20136- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
20137+ /* Enable PAE mode and PSE/PGE */
20138+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
20139 movq %rax, %cr4
20140
20141 /* Setup early boot stage 4 level pagetables. */
20142@@ -182,9 +178,17 @@ ENTRY(secondary_startup_64)
20143 movl $MSR_EFER, %ecx
20144 rdmsr
20145 btsl $_EFER_SCE, %eax /* Enable System Call */
20146- btl $20,%edi /* No Execute supported? */
20147+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20148 jnc 1f
20149 btsl $_EFER_NX, %eax
20150+ leaq init_level4_pgt(%rip), %rdi
20151+#ifndef CONFIG_EFI
20152+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20153+#endif
20154+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20155+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20156+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20157+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20158 1: wrmsr /* Make changes effective */
20159
20160 /* Setup cr0 */
20161@@ -246,6 +250,7 @@ ENTRY(secondary_startup_64)
20162 * jump. In addition we need to ensure %cs is set so we make this
20163 * a far return.
20164 */
20165+ pax_set_fptr_mask
20166 movq initial_code(%rip),%rax
20167 pushq $0 # fake return address to stop unwinder
20168 pushq $__KERNEL_CS # set correct cs
20169@@ -284,7 +289,7 @@ ENDPROC(start_cpu0)
20170 bad_address:
20171 jmp bad_address
20172
20173- .section ".init.text","ax"
20174+ __INIT
20175 .globl early_idt_handlers
20176 early_idt_handlers:
20177 # 104(%rsp) %rflags
20178@@ -343,7 +348,7 @@ ENTRY(early_idt_handler)
20179 call dump_stack
20180 #ifdef CONFIG_KALLSYMS
20181 leaq early_idt_ripmsg(%rip),%rdi
20182- movq 40(%rsp),%rsi # %rip again
20183+ movq 88(%rsp),%rsi # %rip again
20184 call __print_symbol
20185 #endif
20186 #endif /* EARLY_PRINTK */
20187@@ -363,11 +368,15 @@ ENTRY(early_idt_handler)
20188 addq $16,%rsp # drop vector number and error code
20189 decl early_recursion_flag(%rip)
20190 INTERRUPT_RETURN
20191+ .previous
20192
20193+ __INITDATA
20194 .balign 4
20195 early_recursion_flag:
20196 .long 0
20197+ .previous
20198
20199+ .section .rodata,"a",@progbits
20200 #ifdef CONFIG_EARLY_PRINTK
20201 early_idt_msg:
20202 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20203@@ -376,6 +385,7 @@ early_idt_ripmsg:
20204 #endif /* CONFIG_EARLY_PRINTK */
20205 .previous
20206
20207+ .section .rodata,"a",@progbits
20208 #define NEXT_PAGE(name) \
20209 .balign PAGE_SIZE; \
20210 ENTRY(name)
20211@@ -388,7 +398,6 @@ ENTRY(name)
20212 i = i + 1 ; \
20213 .endr
20214
20215- .data
20216 /*
20217 * This default setting generates an ident mapping at address 0x100000
20218 * and a mapping for the kernel that precisely maps virtual address
20219@@ -399,13 +408,41 @@ NEXT_PAGE(init_level4_pgt)
20220 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20221 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20222 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20223+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20224+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20225+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20226+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20227+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20228+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20229 .org init_level4_pgt + L4_START_KERNEL*8, 0
20230 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20231 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20232
20233+#ifdef CONFIG_PAX_PER_CPU_PGD
20234+NEXT_PAGE(cpu_pgd)
20235+ .rept NR_CPUS
20236+ .fill 512,8,0
20237+ .endr
20238+#endif
20239+
20240 NEXT_PAGE(level3_ident_pgt)
20241 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20242+#ifdef CONFIG_XEN
20243 .fill 511,8,0
20244+#else
20245+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20246+ .fill 510,8,0
20247+#endif
20248+
20249+NEXT_PAGE(level3_vmalloc_start_pgt)
20250+ .fill 512,8,0
20251+
20252+NEXT_PAGE(level3_vmalloc_end_pgt)
20253+ .fill 512,8,0
20254+
20255+NEXT_PAGE(level3_vmemmap_pgt)
20256+ .fill L3_VMEMMAP_START,8,0
20257+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20258
20259 NEXT_PAGE(level3_kernel_pgt)
20260 .fill L3_START_KERNEL,8,0
20261@@ -413,20 +450,23 @@ NEXT_PAGE(level3_kernel_pgt)
20262 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20263 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20264
20265+NEXT_PAGE(level2_vmemmap_pgt)
20266+ .fill 512,8,0
20267+
20268 NEXT_PAGE(level2_fixmap_pgt)
20269- .fill 506,8,0
20270- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20271- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20272- .fill 5,8,0
20273+ .fill 507,8,0
20274+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20275+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20276+ .fill 4,8,0
20277
20278-NEXT_PAGE(level1_fixmap_pgt)
20279+NEXT_PAGE(level1_vsyscall_pgt)
20280 .fill 512,8,0
20281
20282-NEXT_PAGE(level2_ident_pgt)
20283- /* Since I easily can, map the first 1G.
20284+ /* Since I easily can, map the first 2G.
20285 * Don't set NX because code runs from these pages.
20286 */
20287- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20288+NEXT_PAGE(level2_ident_pgt)
20289+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20290
20291 NEXT_PAGE(level2_kernel_pgt)
20292 /*
20293@@ -439,37 +479,59 @@ NEXT_PAGE(level2_kernel_pgt)
20294 * If you want to increase this then increase MODULES_VADDR
20295 * too.)
20296 */
20297- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
20298- KERNEL_IMAGE_SIZE/PMD_SIZE)
20299-
20300-NEXT_PAGE(level2_spare_pgt)
20301- .fill 512, 8, 0
20302+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
20303
20304 #undef PMDS
20305 #undef NEXT_PAGE
20306
20307- .data
20308+ .align PAGE_SIZE
20309+ENTRY(cpu_gdt_table)
20310+ .rept NR_CPUS
20311+ .quad 0x0000000000000000 /* NULL descriptor */
20312+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20313+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20314+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20315+ .quad 0x00cffb000000ffff /* __USER32_CS */
20316+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20317+ .quad 0x00affb000000ffff /* __USER_CS */
20318+
20319+#ifdef CONFIG_PAX_KERNEXEC
20320+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20321+#else
20322+ .quad 0x0 /* unused */
20323+#endif
20324+
20325+ .quad 0,0 /* TSS */
20326+ .quad 0,0 /* LDT */
20327+ .quad 0,0,0 /* three TLS descriptors */
20328+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20329+ /* asm/segment.h:GDT_ENTRIES must match this */
20330+
20331+ /* zero the remaining page */
20332+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20333+ .endr
20334+
20335 .align 16
20336 .globl early_gdt_descr
20337 early_gdt_descr:
20338 .word GDT_ENTRIES*8-1
20339 early_gdt_descr_base:
20340- .quad INIT_PER_CPU_VAR(gdt_page)
20341+ .quad cpu_gdt_table
20342
20343 ENTRY(phys_base)
20344 /* This must match the first entry in level2_kernel_pgt */
20345 .quad 0x0000000000000000
20346
20347 #include "../../x86/xen/xen-head.S"
20348-
20349- .section .bss, "aw", @nobits
20350+
20351+ .section .rodata,"a",@progbits
20352 .align L1_CACHE_BYTES
20353 ENTRY(idt_table)
20354- .skip IDT_ENTRIES * 16
20355+ .fill 512,8,0
20356
20357 .align L1_CACHE_BYTES
20358 ENTRY(nmi_idt_table)
20359- .skip IDT_ENTRIES * 16
20360+ .fill 512,8,0
20361
20362 __PAGE_ALIGNED_BSS
20363 .align PAGE_SIZE
20364diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20365index 9c3bd4a..e1d9b35 100644
20366--- a/arch/x86/kernel/i386_ksyms_32.c
20367+++ b/arch/x86/kernel/i386_ksyms_32.c
20368@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20369 EXPORT_SYMBOL(cmpxchg8b_emu);
20370 #endif
20371
20372+EXPORT_SYMBOL_GPL(cpu_gdt_table);
20373+
20374 /* Networking helper routines. */
20375 EXPORT_SYMBOL(csum_partial_copy_generic);
20376+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20377+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20378
20379 EXPORT_SYMBOL(__get_user_1);
20380 EXPORT_SYMBOL(__get_user_2);
20381@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
20382
20383 EXPORT_SYMBOL(csum_partial);
20384 EXPORT_SYMBOL(empty_zero_page);
20385+
20386+#ifdef CONFIG_PAX_KERNEXEC
20387+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20388+#endif
20389diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20390index 245a71d..89d9ce4 100644
20391--- a/arch/x86/kernel/i387.c
20392+++ b/arch/x86/kernel/i387.c
20393@@ -55,7 +55,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20394 static inline bool interrupted_user_mode(void)
20395 {
20396 struct pt_regs *regs = get_irq_regs();
20397- return regs && user_mode_vm(regs);
20398+ return regs && user_mode(regs);
20399 }
20400
20401 /*
20402diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20403index 9a5c460..b332a4b 100644
20404--- a/arch/x86/kernel/i8259.c
20405+++ b/arch/x86/kernel/i8259.c
20406@@ -209,7 +209,7 @@ spurious_8259A_irq:
20407 "spurious 8259A interrupt: IRQ%d.\n", irq);
20408 spurious_irq_mask |= irqmask;
20409 }
20410- atomic_inc(&irq_err_count);
20411+ atomic_inc_unchecked(&irq_err_count);
20412 /*
20413 * Theoretically we do not have to handle this IRQ,
20414 * but in Linux this does not cause problems and is
20415@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20416 /* (slave's support for AEOI in flat mode is to be investigated) */
20417 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20418
20419+ pax_open_kernel();
20420 if (auto_eoi)
20421 /*
20422 * In AEOI mode we just have to mask the interrupt
20423 * when acking.
20424 */
20425- i8259A_chip.irq_mask_ack = disable_8259A_irq;
20426+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20427 else
20428- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20429+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20430+ pax_close_kernel();
20431
20432 udelay(100); /* wait for 8259A to initialize */
20433
20434diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20435index a979b5b..1d6db75 100644
20436--- a/arch/x86/kernel/io_delay.c
20437+++ b/arch/x86/kernel/io_delay.c
20438@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20439 * Quirk table for systems that misbehave (lock up, etc.) if port
20440 * 0x80 is used:
20441 */
20442-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20443+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20444 {
20445 .callback = dmi_io_delay_0xed_port,
20446 .ident = "Compaq Presario V6000",
20447diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20448index 8c96897..be66bfa 100644
20449--- a/arch/x86/kernel/ioport.c
20450+++ b/arch/x86/kernel/ioport.c
20451@@ -6,6 +6,7 @@
20452 #include <linux/sched.h>
20453 #include <linux/kernel.h>
20454 #include <linux/capability.h>
20455+#include <linux/security.h>
20456 #include <linux/errno.h>
20457 #include <linux/types.h>
20458 #include <linux/ioport.h>
20459@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20460
20461 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20462 return -EINVAL;
20463+#ifdef CONFIG_GRKERNSEC_IO
20464+ if (turn_on && grsec_disable_privio) {
20465+ gr_handle_ioperm();
20466+ return -EPERM;
20467+ }
20468+#endif
20469 if (turn_on && !capable(CAP_SYS_RAWIO))
20470 return -EPERM;
20471
20472@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20473 * because the ->io_bitmap_max value must match the bitmap
20474 * contents:
20475 */
20476- tss = &per_cpu(init_tss, get_cpu());
20477+ tss = init_tss + get_cpu();
20478
20479 if (turn_on)
20480 bitmap_clear(t->io_bitmap_ptr, from, num);
20481@@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct pt_regs *regs)
20482 return -EINVAL;
20483 /* Trying to gain more privileges? */
20484 if (level > old) {
20485+#ifdef CONFIG_GRKERNSEC_IO
20486+ if (grsec_disable_privio) {
20487+ gr_handle_iopl();
20488+ return -EPERM;
20489+ }
20490+#endif
20491 if (!capable(CAP_SYS_RAWIO))
20492 return -EPERM;
20493 }
20494diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20495index e4595f1..ee3bfb8 100644
20496--- a/arch/x86/kernel/irq.c
20497+++ b/arch/x86/kernel/irq.c
20498@@ -18,7 +18,7 @@
20499 #include <asm/mce.h>
20500 #include <asm/hw_irq.h>
20501
20502-atomic_t irq_err_count;
20503+atomic_unchecked_t irq_err_count;
20504
20505 /* Function pointer for generic interrupt vector handling */
20506 void (*x86_platform_ipi_callback)(void) = NULL;
20507@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
20508 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
20509 seq_printf(p, " Machine check polls\n");
20510 #endif
20511- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
20512+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
20513 #if defined(CONFIG_X86_IO_APIC)
20514- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
20515+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
20516 #endif
20517 return 0;
20518 }
20519@@ -164,10 +164,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
20520
20521 u64 arch_irq_stat(void)
20522 {
20523- u64 sum = atomic_read(&irq_err_count);
20524+ u64 sum = atomic_read_unchecked(&irq_err_count);
20525
20526 #ifdef CONFIG_X86_IO_APIC
20527- sum += atomic_read(&irq_mis_count);
20528+ sum += atomic_read_unchecked(&irq_mis_count);
20529 #endif
20530 return sum;
20531 }
20532diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
20533index 344faf8..355f60d 100644
20534--- a/arch/x86/kernel/irq_32.c
20535+++ b/arch/x86/kernel/irq_32.c
20536@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
20537 __asm__ __volatile__("andl %%esp,%0" :
20538 "=r" (sp) : "0" (THREAD_SIZE - 1));
20539
20540- return sp < (sizeof(struct thread_info) + STACK_WARN);
20541+ return sp < STACK_WARN;
20542 }
20543
20544 static void print_stack_overflow(void)
20545@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
20546 * per-CPU IRQ handling contexts (thread information and stack)
20547 */
20548 union irq_ctx {
20549- struct thread_info tinfo;
20550- u32 stack[THREAD_SIZE/sizeof(u32)];
20551+ unsigned long previous_esp;
20552+ u32 stack[THREAD_SIZE/sizeof(u32)];
20553 } __attribute__((aligned(THREAD_SIZE)));
20554
20555 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
20556@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
20557 static inline int
20558 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20559 {
20560- union irq_ctx *curctx, *irqctx;
20561+ union irq_ctx *irqctx;
20562 u32 *isp, arg1, arg2;
20563
20564- curctx = (union irq_ctx *) current_thread_info();
20565 irqctx = __this_cpu_read(hardirq_ctx);
20566
20567 /*
20568@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20569 * handler) we can't do that and just have to keep using the
20570 * current stack (which is the irq stack already after all)
20571 */
20572- if (unlikely(curctx == irqctx))
20573+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
20574 return 0;
20575
20576 /* build the stack frame on the IRQ stack */
20577- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20578- irqctx->tinfo.task = curctx->tinfo.task;
20579- irqctx->tinfo.previous_esp = current_stack_pointer;
20580+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20581+ irqctx->previous_esp = current_stack_pointer;
20582
20583- /* Copy the preempt_count so that the [soft]irq checks work. */
20584- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
20585+#ifdef CONFIG_PAX_MEMORY_UDEREF
20586+ __set_fs(MAKE_MM_SEG(0));
20587+#endif
20588
20589 if (unlikely(overflow))
20590 call_on_stack(print_stack_overflow, isp);
20591@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20592 : "0" (irq), "1" (desc), "2" (isp),
20593 "D" (desc->handle_irq)
20594 : "memory", "cc", "ecx");
20595+
20596+#ifdef CONFIG_PAX_MEMORY_UDEREF
20597+ __set_fs(current_thread_info()->addr_limit);
20598+#endif
20599+
20600 return 1;
20601 }
20602
20603@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
20604 */
20605 void __cpuinit irq_ctx_init(int cpu)
20606 {
20607- union irq_ctx *irqctx;
20608-
20609 if (per_cpu(hardirq_ctx, cpu))
20610 return;
20611
20612- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20613- THREADINFO_GFP,
20614- THREAD_SIZE_ORDER));
20615- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20616- irqctx->tinfo.cpu = cpu;
20617- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
20618- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20619-
20620- per_cpu(hardirq_ctx, cpu) = irqctx;
20621-
20622- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
20623- THREADINFO_GFP,
20624- THREAD_SIZE_ORDER));
20625- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
20626- irqctx->tinfo.cpu = cpu;
20627- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
20628-
20629- per_cpu(softirq_ctx, cpu) = irqctx;
20630+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20631+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
20632+
20633+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20634+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20635
20636 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
20637 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
20638@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
20639 asmlinkage void do_softirq(void)
20640 {
20641 unsigned long flags;
20642- struct thread_info *curctx;
20643 union irq_ctx *irqctx;
20644 u32 *isp;
20645
20646@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
20647 local_irq_save(flags);
20648
20649 if (local_softirq_pending()) {
20650- curctx = current_thread_info();
20651 irqctx = __this_cpu_read(softirq_ctx);
20652- irqctx->tinfo.task = curctx->task;
20653- irqctx->tinfo.previous_esp = current_stack_pointer;
20654+ irqctx->previous_esp = current_stack_pointer;
20655
20656 /* build the stack frame on the softirq stack */
20657- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
20658+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
20659+
20660+#ifdef CONFIG_PAX_MEMORY_UDEREF
20661+ __set_fs(MAKE_MM_SEG(0));
20662+#endif
20663
20664 call_on_stack(__do_softirq, isp);
20665+
20666+#ifdef CONFIG_PAX_MEMORY_UDEREF
20667+ __set_fs(current_thread_info()->addr_limit);
20668+#endif
20669+
20670 /*
20671 * Shouldn't happen, we returned above if in_interrupt():
20672 */
20673@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
20674 if (unlikely(!desc))
20675 return false;
20676
20677- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20678+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
20679 if (unlikely(overflow))
20680 print_stack_overflow();
20681 desc->handle_irq(irq, desc);
20682diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
20683index d04d3ec..ea4b374 100644
20684--- a/arch/x86/kernel/irq_64.c
20685+++ b/arch/x86/kernel/irq_64.c
20686@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
20687 u64 estack_top, estack_bottom;
20688 u64 curbase = (u64)task_stack_page(current);
20689
20690- if (user_mode_vm(regs))
20691+ if (user_mode(regs))
20692 return;
20693
20694 if (regs->sp >= curbase + sizeof(struct thread_info) +
20695diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
20696index dc1404b..bbc43e7 100644
20697--- a/arch/x86/kernel/kdebugfs.c
20698+++ b/arch/x86/kernel/kdebugfs.c
20699@@ -27,7 +27,7 @@ struct setup_data_node {
20700 u32 len;
20701 };
20702
20703-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
20704+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
20705 size_t count, loff_t *ppos)
20706 {
20707 struct setup_data_node *node = file->private_data;
20708diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
20709index 836f832..a8bda67 100644
20710--- a/arch/x86/kernel/kgdb.c
20711+++ b/arch/x86/kernel/kgdb.c
20712@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
20713 #ifdef CONFIG_X86_32
20714 switch (regno) {
20715 case GDB_SS:
20716- if (!user_mode_vm(regs))
20717+ if (!user_mode(regs))
20718 *(unsigned long *)mem = __KERNEL_DS;
20719 break;
20720 case GDB_SP:
20721- if (!user_mode_vm(regs))
20722+ if (!user_mode(regs))
20723 *(unsigned long *)mem = kernel_stack_pointer(regs);
20724 break;
20725 case GDB_GS:
20726@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
20727 bp->attr.bp_addr = breakinfo[breakno].addr;
20728 bp->attr.bp_len = breakinfo[breakno].len;
20729 bp->attr.bp_type = breakinfo[breakno].type;
20730- info->address = breakinfo[breakno].addr;
20731+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
20732+ info->address = ktla_ktva(breakinfo[breakno].addr);
20733+ else
20734+ info->address = breakinfo[breakno].addr;
20735 info->len = breakinfo[breakno].len;
20736 info->type = breakinfo[breakno].type;
20737 val = arch_install_hw_breakpoint(bp);
20738@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
20739 case 'k':
20740 /* clear the trace bit */
20741 linux_regs->flags &= ~X86_EFLAGS_TF;
20742- atomic_set(&kgdb_cpu_doing_single_step, -1);
20743+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
20744
20745 /* set the trace bit if we're stepping */
20746 if (remcomInBuffer[0] == 's') {
20747 linux_regs->flags |= X86_EFLAGS_TF;
20748- atomic_set(&kgdb_cpu_doing_single_step,
20749+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
20750 raw_smp_processor_id());
20751 }
20752
20753@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
20754
20755 switch (cmd) {
20756 case DIE_DEBUG:
20757- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
20758+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
20759 if (user_mode(regs))
20760 return single_step_cont(regs, args);
20761 break;
20762@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
20763 #endif /* CONFIG_DEBUG_RODATA */
20764
20765 bpt->type = BP_BREAKPOINT;
20766- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
20767+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
20768 BREAK_INSTR_SIZE);
20769 if (err)
20770 return err;
20771- err = probe_kernel_write((char *)bpt->bpt_addr,
20772+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
20773 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
20774 #ifdef CONFIG_DEBUG_RODATA
20775 if (!err)
20776@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
20777 return -EBUSY;
20778 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
20779 BREAK_INSTR_SIZE);
20780- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
20781+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
20782 if (err)
20783 return err;
20784 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
20785@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
20786 if (mutex_is_locked(&text_mutex))
20787 goto knl_write;
20788 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
20789- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
20790+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
20791 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
20792 goto knl_write;
20793 return err;
20794 knl_write:
20795 #endif /* CONFIG_DEBUG_RODATA */
20796- return probe_kernel_write((char *)bpt->bpt_addr,
20797+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
20798 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
20799 }
20800
20801diff --git a/arch/x86/kernel/kprobes-opt.c b/arch/x86/kernel/kprobes-opt.c
20802index c5e410e..ed5a7f0 100644
20803--- a/arch/x86/kernel/kprobes-opt.c
20804+++ b/arch/x86/kernel/kprobes-opt.c
20805@@ -338,7 +338,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
20806 * Verify if the address gap is in 2GB range, because this uses
20807 * a relative jump.
20808 */
20809- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
20810+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
20811 if (abs(rel) > 0x7fffffff)
20812 return -ERANGE;
20813
20814@@ -353,16 +353,16 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
20815 op->optinsn.size = ret;
20816
20817 /* Copy arch-dep-instance from template */
20818- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
20819+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
20820
20821 /* Set probe information */
20822 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
20823
20824 /* Set probe function call */
20825- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
20826+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
20827
20828 /* Set returning jmp instruction at the tail of out-of-line buffer */
20829- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
20830+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
20831 (u8 *)op->kp.addr + op->optinsn.size);
20832
20833 flush_icache_range((unsigned long) buf,
20834@@ -385,7 +385,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
20835 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
20836
20837 /* Backup instructions which will be replaced by jump address */
20838- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
20839+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
20840 RELATIVE_ADDR_SIZE);
20841
20842 insn_buf[0] = RELATIVEJUMP_OPCODE;
20843@@ -483,7 +483,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
20844 /* This kprobe is really able to run optimized path. */
20845 op = container_of(p, struct optimized_kprobe, kp);
20846 /* Detour through copied instructions */
20847- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
20848+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
20849 if (!reenter)
20850 reset_current_kprobe();
20851 preempt_enable_no_resched();
20852diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
20853index 57916c0..9e0b9d0 100644
20854--- a/arch/x86/kernel/kprobes.c
20855+++ b/arch/x86/kernel/kprobes.c
20856@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
20857 s32 raddr;
20858 } __attribute__((packed)) *insn;
20859
20860- insn = (struct __arch_relative_insn *)from;
20861+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
20862+
20863+ pax_open_kernel();
20864 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
20865 insn->op = op;
20866+ pax_close_kernel();
20867 }
20868
20869 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
20870@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
20871 kprobe_opcode_t opcode;
20872 kprobe_opcode_t *orig_opcodes = opcodes;
20873
20874- if (search_exception_tables((unsigned long)opcodes))
20875+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
20876 return 0; /* Page fault may occur on this address. */
20877
20878 retry:
20879@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
20880 * for the first byte, we can recover the original instruction
20881 * from it and kp->opcode.
20882 */
20883- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
20884+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
20885 buf[0] = kp->opcode;
20886- return (unsigned long)buf;
20887+ return ktva_ktla((unsigned long)buf);
20888 }
20889
20890 /*
20891@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
20892 /* Another subsystem puts a breakpoint, failed to recover */
20893 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
20894 return 0;
20895+ pax_open_kernel();
20896 memcpy(dest, insn.kaddr, insn.length);
20897+ pax_close_kernel();
20898
20899 #ifdef CONFIG_X86_64
20900 if (insn_rip_relative(&insn)) {
20901@@ -355,7 +360,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
20902 newdisp = (u8 *) src + (s64) insn.displacement.value - (u8 *) dest;
20903 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
20904 disp = (u8 *) dest + insn_offset_displacement(&insn);
20905+ pax_open_kernel();
20906 *(s32 *) disp = (s32) newdisp;
20907+ pax_close_kernel();
20908 }
20909 #endif
20910 return insn.length;
20911@@ -485,7 +492,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
20912 * nor set current_kprobe, because it doesn't use single
20913 * stepping.
20914 */
20915- regs->ip = (unsigned long)p->ainsn.insn;
20916+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
20917 preempt_enable_no_resched();
20918 return;
20919 }
20920@@ -502,9 +509,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
20921 regs->flags &= ~X86_EFLAGS_IF;
20922 /* single step inline if the instruction is an int3 */
20923 if (p->opcode == BREAKPOINT_INSTRUCTION)
20924- regs->ip = (unsigned long)p->addr;
20925+ regs->ip = ktla_ktva((unsigned long)p->addr);
20926 else
20927- regs->ip = (unsigned long)p->ainsn.insn;
20928+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
20929 }
20930
20931 /*
20932@@ -600,7 +607,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
20933 setup_singlestep(p, regs, kcb, 0);
20934 return 1;
20935 }
20936- } else if (*addr != BREAKPOINT_INSTRUCTION) {
20937+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
20938 /*
20939 * The breakpoint instruction was removed right
20940 * after we hit it. Another cpu has removed
20941@@ -651,6 +658,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
20942 " movq %rax, 152(%rsp)\n"
20943 RESTORE_REGS_STRING
20944 " popfq\n"
20945+#ifdef KERNEXEC_PLUGIN
20946+ " btsq $63,(%rsp)\n"
20947+#endif
20948 #else
20949 " pushf\n"
20950 SAVE_REGS_STRING
20951@@ -788,7 +798,7 @@ static void __kprobes
20952 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
20953 {
20954 unsigned long *tos = stack_addr(regs);
20955- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
20956+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
20957 unsigned long orig_ip = (unsigned long)p->addr;
20958 kprobe_opcode_t *insn = p->ainsn.insn;
20959
20960@@ -970,7 +980,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
20961 struct die_args *args = data;
20962 int ret = NOTIFY_DONE;
20963
20964- if (args->regs && user_mode_vm(args->regs))
20965+ if (args->regs && user_mode(args->regs))
20966 return ret;
20967
20968 switch (val) {
20969diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
20970index 9c2bd8b..bb1131c 100644
20971--- a/arch/x86/kernel/kvm.c
20972+++ b/arch/x86/kernel/kvm.c
20973@@ -452,7 +452,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
20974 return NOTIFY_OK;
20975 }
20976
20977-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
20978+static struct notifier_block kvm_cpu_notifier = {
20979 .notifier_call = kvm_cpu_notify,
20980 };
20981 #endif
20982diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
20983index ebc9873..1b9724b 100644
20984--- a/arch/x86/kernel/ldt.c
20985+++ b/arch/x86/kernel/ldt.c
20986@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
20987 if (reload) {
20988 #ifdef CONFIG_SMP
20989 preempt_disable();
20990- load_LDT(pc);
20991+ load_LDT_nolock(pc);
20992 if (!cpumask_equal(mm_cpumask(current->mm),
20993 cpumask_of(smp_processor_id())))
20994 smp_call_function(flush_ldt, current->mm, 1);
20995 preempt_enable();
20996 #else
20997- load_LDT(pc);
20998+ load_LDT_nolock(pc);
20999 #endif
21000 }
21001 if (oldsize) {
21002@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21003 return err;
21004
21005 for (i = 0; i < old->size; i++)
21006- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21007+ write_ldt_entry(new->ldt, i, old->ldt + i);
21008 return 0;
21009 }
21010
21011@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21012 retval = copy_ldt(&mm->context, &old_mm->context);
21013 mutex_unlock(&old_mm->context.lock);
21014 }
21015+
21016+ if (tsk == current) {
21017+ mm->context.vdso = 0;
21018+
21019+#ifdef CONFIG_X86_32
21020+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21021+ mm->context.user_cs_base = 0UL;
21022+ mm->context.user_cs_limit = ~0UL;
21023+
21024+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21025+ cpus_clear(mm->context.cpu_user_cs_mask);
21026+#endif
21027+
21028+#endif
21029+#endif
21030+
21031+ }
21032+
21033 return retval;
21034 }
21035
21036@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21037 }
21038 }
21039
21040+#ifdef CONFIG_PAX_SEGMEXEC
21041+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21042+ error = -EINVAL;
21043+ goto out_unlock;
21044+ }
21045+#endif
21046+
21047 fill_ldt(&ldt, &ldt_info);
21048 if (oldmode)
21049 ldt.avl = 0;
21050diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21051index 5b19e4d..6476a76 100644
21052--- a/arch/x86/kernel/machine_kexec_32.c
21053+++ b/arch/x86/kernel/machine_kexec_32.c
21054@@ -26,7 +26,7 @@
21055 #include <asm/cacheflush.h>
21056 #include <asm/debugreg.h>
21057
21058-static void set_idt(void *newidt, __u16 limit)
21059+static void set_idt(struct desc_struct *newidt, __u16 limit)
21060 {
21061 struct desc_ptr curidt;
21062
21063@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21064 }
21065
21066
21067-static void set_gdt(void *newgdt, __u16 limit)
21068+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21069 {
21070 struct desc_ptr curgdt;
21071
21072@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21073 }
21074
21075 control_page = page_address(image->control_code_page);
21076- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21077+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21078
21079 relocate_kernel_ptr = control_page;
21080 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21081diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21082index 3a04b22..1d2eb09 100644
21083--- a/arch/x86/kernel/microcode_core.c
21084+++ b/arch/x86/kernel/microcode_core.c
21085@@ -512,7 +512,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21086 return NOTIFY_OK;
21087 }
21088
21089-static struct notifier_block __refdata mc_cpu_notifier = {
21090+static struct notifier_block mc_cpu_notifier = {
21091 .notifier_call = mc_cpu_callback,
21092 };
21093
21094diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21095index 3544aed..01ddc1c 100644
21096--- a/arch/x86/kernel/microcode_intel.c
21097+++ b/arch/x86/kernel/microcode_intel.c
21098@@ -431,13 +431,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21099
21100 static int get_ucode_user(void *to, const void *from, size_t n)
21101 {
21102- return copy_from_user(to, from, n);
21103+ return copy_from_user(to, (const void __force_user *)from, n);
21104 }
21105
21106 static enum ucode_state
21107 request_microcode_user(int cpu, const void __user *buf, size_t size)
21108 {
21109- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21110+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21111 }
21112
21113 static void microcode_fini_cpu(int cpu)
21114diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21115index 216a4d7..228255a 100644
21116--- a/arch/x86/kernel/module.c
21117+++ b/arch/x86/kernel/module.c
21118@@ -43,15 +43,60 @@ do { \
21119 } while (0)
21120 #endif
21121
21122-void *module_alloc(unsigned long size)
21123+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21124 {
21125- if (PAGE_ALIGN(size) > MODULES_LEN)
21126+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21127 return NULL;
21128 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21129- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21130+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21131 -1, __builtin_return_address(0));
21132 }
21133
21134+void *module_alloc(unsigned long size)
21135+{
21136+
21137+#ifdef CONFIG_PAX_KERNEXEC
21138+ return __module_alloc(size, PAGE_KERNEL);
21139+#else
21140+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21141+#endif
21142+
21143+}
21144+
21145+#ifdef CONFIG_PAX_KERNEXEC
21146+#ifdef CONFIG_X86_32
21147+void *module_alloc_exec(unsigned long size)
21148+{
21149+ struct vm_struct *area;
21150+
21151+ if (size == 0)
21152+ return NULL;
21153+
21154+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21155+ return area ? area->addr : NULL;
21156+}
21157+EXPORT_SYMBOL(module_alloc_exec);
21158+
21159+void module_free_exec(struct module *mod, void *module_region)
21160+{
21161+ vunmap(module_region);
21162+}
21163+EXPORT_SYMBOL(module_free_exec);
21164+#else
21165+void module_free_exec(struct module *mod, void *module_region)
21166+{
21167+ module_free(mod, module_region);
21168+}
21169+EXPORT_SYMBOL(module_free_exec);
21170+
21171+void *module_alloc_exec(unsigned long size)
21172+{
21173+ return __module_alloc(size, PAGE_KERNEL_RX);
21174+}
21175+EXPORT_SYMBOL(module_alloc_exec);
21176+#endif
21177+#endif
21178+
21179 #ifdef CONFIG_X86_32
21180 int apply_relocate(Elf32_Shdr *sechdrs,
21181 const char *strtab,
21182@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21183 unsigned int i;
21184 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21185 Elf32_Sym *sym;
21186- uint32_t *location;
21187+ uint32_t *plocation, location;
21188
21189 DEBUGP("Applying relocate section %u to %u\n",
21190 relsec, sechdrs[relsec].sh_info);
21191 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21192 /* This is where to make the change */
21193- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21194- + rel[i].r_offset;
21195+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21196+ location = (uint32_t)plocation;
21197+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21198+ plocation = ktla_ktva((void *)plocation);
21199 /* This is the symbol it is referring to. Note that all
21200 undefined symbols have been resolved. */
21201 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21202@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21203 switch (ELF32_R_TYPE(rel[i].r_info)) {
21204 case R_386_32:
21205 /* We add the value into the location given */
21206- *location += sym->st_value;
21207+ pax_open_kernel();
21208+ *plocation += sym->st_value;
21209+ pax_close_kernel();
21210 break;
21211 case R_386_PC32:
21212 /* Add the value, subtract its position */
21213- *location += sym->st_value - (uint32_t)location;
21214+ pax_open_kernel();
21215+ *plocation += sym->st_value - location;
21216+ pax_close_kernel();
21217 break;
21218 default:
21219 pr_err("%s: Unknown relocation: %u\n",
21220@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21221 case R_X86_64_NONE:
21222 break;
21223 case R_X86_64_64:
21224+ pax_open_kernel();
21225 *(u64 *)loc = val;
21226+ pax_close_kernel();
21227 break;
21228 case R_X86_64_32:
21229+ pax_open_kernel();
21230 *(u32 *)loc = val;
21231+ pax_close_kernel();
21232 if (val != *(u32 *)loc)
21233 goto overflow;
21234 break;
21235 case R_X86_64_32S:
21236+ pax_open_kernel();
21237 *(s32 *)loc = val;
21238+ pax_close_kernel();
21239 if ((s64)val != *(s32 *)loc)
21240 goto overflow;
21241 break;
21242 case R_X86_64_PC32:
21243 val -= (u64)loc;
21244+ pax_open_kernel();
21245 *(u32 *)loc = val;
21246+ pax_close_kernel();
21247+
21248 #if 0
21249 if ((s64)val != *(s32 *)loc)
21250 goto overflow;
21251diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21252index 4929502..686c291 100644
21253--- a/arch/x86/kernel/msr.c
21254+++ b/arch/x86/kernel/msr.c
21255@@ -234,7 +234,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21256 return notifier_from_errno(err);
21257 }
21258
21259-static struct notifier_block __refdata msr_class_cpu_notifier = {
21260+static struct notifier_block msr_class_cpu_notifier = {
21261 .notifier_call = msr_class_cpu_callback,
21262 };
21263
21264diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21265index f84f5c5..f404e81 100644
21266--- a/arch/x86/kernel/nmi.c
21267+++ b/arch/x86/kernel/nmi.c
21268@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21269 return handled;
21270 }
21271
21272-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21273+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21274 {
21275 struct nmi_desc *desc = nmi_to_desc(type);
21276 unsigned long flags;
21277@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21278 * event confuses some handlers (kdump uses this flag)
21279 */
21280 if (action->flags & NMI_FLAG_FIRST)
21281- list_add_rcu(&action->list, &desc->head);
21282+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21283 else
21284- list_add_tail_rcu(&action->list, &desc->head);
21285+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21286
21287 spin_unlock_irqrestore(&desc->lock, flags);
21288 return 0;
21289@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21290 if (!strcmp(n->name, name)) {
21291 WARN(in_nmi(),
21292 "Trying to free NMI (%s) from NMI context!\n", n->name);
21293- list_del_rcu(&n->list);
21294+ pax_list_del_rcu((struct list_head *)&n->list);
21295 break;
21296 }
21297 }
21298@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21299 dotraplinkage notrace __kprobes void
21300 do_nmi(struct pt_regs *regs, long error_code)
21301 {
21302+
21303+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21304+ if (!user_mode(regs)) {
21305+ unsigned long cs = regs->cs & 0xFFFF;
21306+ unsigned long ip = ktva_ktla(regs->ip);
21307+
21308+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21309+ regs->ip = ip;
21310+ }
21311+#endif
21312+
21313 nmi_nesting_preprocess(regs);
21314
21315 nmi_enter();
21316diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21317index 6d9582e..f746287 100644
21318--- a/arch/x86/kernel/nmi_selftest.c
21319+++ b/arch/x86/kernel/nmi_selftest.c
21320@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21321 {
21322 /* trap all the unknown NMIs we may generate */
21323 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21324- __initdata);
21325+ __initconst);
21326 }
21327
21328 static void __init cleanup_nmi_testsuite(void)
21329@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21330 unsigned long timeout;
21331
21332 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21333- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21334+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21335 nmi_fail = FAILURE;
21336 return;
21337 }
21338diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21339index 676b8c7..870ba04 100644
21340--- a/arch/x86/kernel/paravirt-spinlocks.c
21341+++ b/arch/x86/kernel/paravirt-spinlocks.c
21342@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21343 arch_spin_lock(lock);
21344 }
21345
21346-struct pv_lock_ops pv_lock_ops = {
21347+struct pv_lock_ops pv_lock_ops __read_only = {
21348 #ifdef CONFIG_SMP
21349 .spin_is_locked = __ticket_spin_is_locked,
21350 .spin_is_contended = __ticket_spin_is_contended,
21351diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21352index 17fff18..5cfa0f4 100644
21353--- a/arch/x86/kernel/paravirt.c
21354+++ b/arch/x86/kernel/paravirt.c
21355@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21356 {
21357 return x;
21358 }
21359+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21360+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21361+#endif
21362
21363 void __init default_banner(void)
21364 {
21365@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21366 if (opfunc == NULL)
21367 /* If there's no function, patch it with a ud2a (BUG) */
21368 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21369- else if (opfunc == _paravirt_nop)
21370+ else if (opfunc == (void *)_paravirt_nop)
21371 /* If the operation is a nop, then nop the callsite */
21372 ret = paravirt_patch_nop();
21373
21374 /* identity functions just return their single argument */
21375- else if (opfunc == _paravirt_ident_32)
21376+ else if (opfunc == (void *)_paravirt_ident_32)
21377 ret = paravirt_patch_ident_32(insnbuf, len);
21378- else if (opfunc == _paravirt_ident_64)
21379+ else if (opfunc == (void *)_paravirt_ident_64)
21380 ret = paravirt_patch_ident_64(insnbuf, len);
21381+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21382+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21383+ ret = paravirt_patch_ident_64(insnbuf, len);
21384+#endif
21385
21386 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21387 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21388@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21389 if (insn_len > len || start == NULL)
21390 insn_len = len;
21391 else
21392- memcpy(insnbuf, start, insn_len);
21393+ memcpy(insnbuf, ktla_ktva(start), insn_len);
21394
21395 return insn_len;
21396 }
21397@@ -304,7 +311,7 @@ void arch_flush_lazy_mmu_mode(void)
21398 preempt_enable();
21399 }
21400
21401-struct pv_info pv_info = {
21402+struct pv_info pv_info __read_only = {
21403 .name = "bare hardware",
21404 .paravirt_enabled = 0,
21405 .kernel_rpl = 0,
21406@@ -315,16 +322,16 @@ struct pv_info pv_info = {
21407 #endif
21408 };
21409
21410-struct pv_init_ops pv_init_ops = {
21411+struct pv_init_ops pv_init_ops __read_only = {
21412 .patch = native_patch,
21413 };
21414
21415-struct pv_time_ops pv_time_ops = {
21416+struct pv_time_ops pv_time_ops __read_only = {
21417 .sched_clock = native_sched_clock,
21418 .steal_clock = native_steal_clock,
21419 };
21420
21421-struct pv_irq_ops pv_irq_ops = {
21422+struct pv_irq_ops pv_irq_ops __read_only = {
21423 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21424 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21425 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21426@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21427 #endif
21428 };
21429
21430-struct pv_cpu_ops pv_cpu_ops = {
21431+struct pv_cpu_ops pv_cpu_ops __read_only = {
21432 .cpuid = native_cpuid,
21433 .get_debugreg = native_get_debugreg,
21434 .set_debugreg = native_set_debugreg,
21435@@ -395,21 +402,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21436 .end_context_switch = paravirt_nop,
21437 };
21438
21439-struct pv_apic_ops pv_apic_ops = {
21440+struct pv_apic_ops pv_apic_ops __read_only= {
21441 #ifdef CONFIG_X86_LOCAL_APIC
21442 .startup_ipi_hook = paravirt_nop,
21443 #endif
21444 };
21445
21446-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21447+#ifdef CONFIG_X86_32
21448+#ifdef CONFIG_X86_PAE
21449+/* 64-bit pagetable entries */
21450+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21451+#else
21452 /* 32-bit pagetable entries */
21453 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21454+#endif
21455 #else
21456 /* 64-bit pagetable entries */
21457 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21458 #endif
21459
21460-struct pv_mmu_ops pv_mmu_ops = {
21461+struct pv_mmu_ops pv_mmu_ops __read_only = {
21462
21463 .read_cr2 = native_read_cr2,
21464 .write_cr2 = native_write_cr2,
21465@@ -459,6 +471,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21466 .make_pud = PTE_IDENT,
21467
21468 .set_pgd = native_set_pgd,
21469+ .set_pgd_batched = native_set_pgd_batched,
21470 #endif
21471 #endif /* PAGETABLE_LEVELS >= 3 */
21472
21473@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21474 },
21475
21476 .set_fixmap = native_set_fixmap,
21477+
21478+#ifdef CONFIG_PAX_KERNEXEC
21479+ .pax_open_kernel = native_pax_open_kernel,
21480+ .pax_close_kernel = native_pax_close_kernel,
21481+#endif
21482+
21483 };
21484
21485 EXPORT_SYMBOL_GPL(pv_time_ops);
21486diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
21487index 35ccf75..7a15747 100644
21488--- a/arch/x86/kernel/pci-iommu_table.c
21489+++ b/arch/x86/kernel/pci-iommu_table.c
21490@@ -2,7 +2,7 @@
21491 #include <asm/iommu_table.h>
21492 #include <linux/string.h>
21493 #include <linux/kallsyms.h>
21494-
21495+#include <linux/sched.h>
21496
21497 #define DEBUG 1
21498
21499diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
21500index 6c483ba..d10ce2f 100644
21501--- a/arch/x86/kernel/pci-swiotlb.c
21502+++ b/arch/x86/kernel/pci-swiotlb.c
21503@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
21504 void *vaddr, dma_addr_t dma_addr,
21505 struct dma_attrs *attrs)
21506 {
21507- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
21508+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
21509 }
21510
21511 static struct dma_map_ops swiotlb_dma_ops = {
21512diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
21513index 2ed787f..f70c9f6 100644
21514--- a/arch/x86/kernel/process.c
21515+++ b/arch/x86/kernel/process.c
21516@@ -36,7 +36,8 @@
21517 * section. Since TSS's are completely CPU-local, we want them
21518 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
21519 */
21520-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
21521+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
21522+EXPORT_SYMBOL(init_tss);
21523
21524 #ifdef CONFIG_X86_64
21525 static DEFINE_PER_CPU(unsigned char, is_idle);
21526@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
21527 task_xstate_cachep =
21528 kmem_cache_create("task_xstate", xstate_size,
21529 __alignof__(union thread_xstate),
21530- SLAB_PANIC | SLAB_NOTRACK, NULL);
21531+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
21532 }
21533
21534 /*
21535@@ -105,7 +106,7 @@ void exit_thread(void)
21536 unsigned long *bp = t->io_bitmap_ptr;
21537
21538 if (bp) {
21539- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
21540+ struct tss_struct *tss = init_tss + get_cpu();
21541
21542 t->io_bitmap_ptr = NULL;
21543 clear_thread_flag(TIF_IO_BITMAP);
21544@@ -136,7 +137,7 @@ void show_regs_common(void)
21545 board = dmi_get_system_info(DMI_BOARD_NAME);
21546
21547 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s %s%s%s\n",
21548- current->pid, current->comm, print_tainted(),
21549+ task_pid_nr(current), current->comm, print_tainted(),
21550 init_utsname()->release,
21551 (int)strcspn(init_utsname()->version, " "),
21552 init_utsname()->version,
21553@@ -149,6 +150,9 @@ void flush_thread(void)
21554 {
21555 struct task_struct *tsk = current;
21556
21557+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
21558+ loadsegment(gs, 0);
21559+#endif
21560 flush_ptrace_hw_breakpoint(tsk);
21561 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
21562 drop_init_fpu(tsk);
21563@@ -301,7 +305,7 @@ static void __exit_idle(void)
21564 void exit_idle(void)
21565 {
21566 /* idle loop has pid 0 */
21567- if (current->pid)
21568+ if (task_pid_nr(current))
21569 return;
21570 __exit_idle();
21571 }
21572@@ -404,7 +408,7 @@ bool set_pm_idle_to_default(void)
21573
21574 return ret;
21575 }
21576-void stop_this_cpu(void *dummy)
21577+__noreturn void stop_this_cpu(void *dummy)
21578 {
21579 local_irq_disable();
21580 /*
21581@@ -632,16 +636,37 @@ static int __init idle_setup(char *str)
21582 }
21583 early_param("idle", idle_setup);
21584
21585-unsigned long arch_align_stack(unsigned long sp)
21586+#ifdef CONFIG_PAX_RANDKSTACK
21587+void pax_randomize_kstack(struct pt_regs *regs)
21588 {
21589- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
21590- sp -= get_random_int() % 8192;
21591- return sp & ~0xf;
21592-}
21593+ struct thread_struct *thread = &current->thread;
21594+ unsigned long time;
21595
21596-unsigned long arch_randomize_brk(struct mm_struct *mm)
21597-{
21598- unsigned long range_end = mm->brk + 0x02000000;
21599- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
21600-}
21601+ if (!randomize_va_space)
21602+ return;
21603+
21604+ if (v8086_mode(regs))
21605+ return;
21606
21607+ rdtscl(time);
21608+
21609+ /* P4 seems to return a 0 LSB, ignore it */
21610+#ifdef CONFIG_MPENTIUM4
21611+ time &= 0x3EUL;
21612+ time <<= 2;
21613+#elif defined(CONFIG_X86_64)
21614+ time &= 0xFUL;
21615+ time <<= 4;
21616+#else
21617+ time &= 0x1FUL;
21618+ time <<= 3;
21619+#endif
21620+
21621+ thread->sp0 ^= time;
21622+ load_sp0(init_tss + smp_processor_id(), thread);
21623+
21624+#ifdef CONFIG_X86_64
21625+ this_cpu_write(kernel_stack, thread->sp0);
21626+#endif
21627+}
21628+#endif
21629diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
21630index b5a8905..d9cacac 100644
21631--- a/arch/x86/kernel/process_32.c
21632+++ b/arch/x86/kernel/process_32.c
21633@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
21634 unsigned long thread_saved_pc(struct task_struct *tsk)
21635 {
21636 return ((unsigned long *)tsk->thread.sp)[3];
21637+//XXX return tsk->thread.eip;
21638 }
21639
21640 void __show_regs(struct pt_regs *regs, int all)
21641@@ -74,21 +75,20 @@ void __show_regs(struct pt_regs *regs, int all)
21642 unsigned long sp;
21643 unsigned short ss, gs;
21644
21645- if (user_mode_vm(regs)) {
21646+ if (user_mode(regs)) {
21647 sp = regs->sp;
21648 ss = regs->ss & 0xffff;
21649- gs = get_user_gs(regs);
21650 } else {
21651 sp = kernel_stack_pointer(regs);
21652 savesegment(ss, ss);
21653- savesegment(gs, gs);
21654 }
21655+ gs = get_user_gs(regs);
21656
21657 show_regs_common();
21658
21659 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
21660 (u16)regs->cs, regs->ip, regs->flags,
21661- smp_processor_id());
21662+ raw_smp_processor_id());
21663 print_symbol("EIP is at %s\n", regs->ip);
21664
21665 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
21666@@ -130,20 +130,21 @@ void release_thread(struct task_struct *dead_task)
21667 int copy_thread(unsigned long clone_flags, unsigned long sp,
21668 unsigned long arg, struct task_struct *p)
21669 {
21670- struct pt_regs *childregs = task_pt_regs(p);
21671+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
21672 struct task_struct *tsk;
21673 int err;
21674
21675 p->thread.sp = (unsigned long) childregs;
21676 p->thread.sp0 = (unsigned long) (childregs+1);
21677+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21678
21679 if (unlikely(p->flags & PF_KTHREAD)) {
21680 /* kernel thread */
21681 memset(childregs, 0, sizeof(struct pt_regs));
21682 p->thread.ip = (unsigned long) ret_from_kernel_thread;
21683- task_user_gs(p) = __KERNEL_STACK_CANARY;
21684- childregs->ds = __USER_DS;
21685- childregs->es = __USER_DS;
21686+ savesegment(gs, childregs->gs);
21687+ childregs->ds = __KERNEL_DS;
21688+ childregs->es = __KERNEL_DS;
21689 childregs->fs = __KERNEL_PERCPU;
21690 childregs->bx = sp; /* function */
21691 childregs->bp = arg;
21692@@ -250,7 +251,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21693 struct thread_struct *prev = &prev_p->thread,
21694 *next = &next_p->thread;
21695 int cpu = smp_processor_id();
21696- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21697+ struct tss_struct *tss = init_tss + cpu;
21698 fpu_switch_t fpu;
21699
21700 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
21701@@ -274,6 +275,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21702 */
21703 lazy_save_gs(prev->gs);
21704
21705+#ifdef CONFIG_PAX_MEMORY_UDEREF
21706+ __set_fs(task_thread_info(next_p)->addr_limit);
21707+#endif
21708+
21709 /*
21710 * Load the per-thread Thread-Local Storage descriptor.
21711 */
21712@@ -304,6 +309,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21713 */
21714 arch_end_context_switch(next_p);
21715
21716+ this_cpu_write(current_task, next_p);
21717+ this_cpu_write(current_tinfo, &next_p->tinfo);
21718+
21719 /*
21720 * Restore %gs if needed (which is common)
21721 */
21722@@ -312,8 +320,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21723
21724 switch_fpu_finish(next_p, fpu);
21725
21726- this_cpu_write(current_task, next_p);
21727-
21728 return prev_p;
21729 }
21730
21731@@ -343,4 +349,3 @@ unsigned long get_wchan(struct task_struct *p)
21732 } while (count++ < 16);
21733 return 0;
21734 }
21735-
21736diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
21737index 6e68a61..955a9a5 100644
21738--- a/arch/x86/kernel/process_64.c
21739+++ b/arch/x86/kernel/process_64.c
21740@@ -152,10 +152,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
21741 struct pt_regs *childregs;
21742 struct task_struct *me = current;
21743
21744- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
21745+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
21746 childregs = task_pt_regs(p);
21747 p->thread.sp = (unsigned long) childregs;
21748 p->thread.usersp = me->thread.usersp;
21749+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
21750 set_tsk_thread_flag(p, TIF_FORK);
21751 p->fpu_counter = 0;
21752 p->thread.io_bitmap_ptr = NULL;
21753@@ -274,7 +275,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21754 struct thread_struct *prev = &prev_p->thread;
21755 struct thread_struct *next = &next_p->thread;
21756 int cpu = smp_processor_id();
21757- struct tss_struct *tss = &per_cpu(init_tss, cpu);
21758+ struct tss_struct *tss = init_tss + cpu;
21759 unsigned fsindex, gsindex;
21760 fpu_switch_t fpu;
21761
21762@@ -356,10 +357,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
21763 prev->usersp = this_cpu_read(old_rsp);
21764 this_cpu_write(old_rsp, next->usersp);
21765 this_cpu_write(current_task, next_p);
21766+ this_cpu_write(current_tinfo, &next_p->tinfo);
21767
21768- this_cpu_write(kernel_stack,
21769- (unsigned long)task_stack_page(next_p) +
21770- THREAD_SIZE - KERNEL_STACK_OFFSET);
21771+ this_cpu_write(kernel_stack, next->sp0);
21772
21773 /*
21774 * Now maybe reload the debug registers and handle I/O bitmaps
21775@@ -428,12 +428,11 @@ unsigned long get_wchan(struct task_struct *p)
21776 if (!p || p == current || p->state == TASK_RUNNING)
21777 return 0;
21778 stack = (unsigned long)task_stack_page(p);
21779- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
21780+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
21781 return 0;
21782 fp = *(u64 *)(p->thread.sp);
21783 do {
21784- if (fp < (unsigned long)stack ||
21785- fp >= (unsigned long)stack+THREAD_SIZE)
21786+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
21787 return 0;
21788 ip = *(u64 *)(fp+8);
21789 if (!in_sched_functions(ip))
21790diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
21791index b629bbe..0fa615a 100644
21792--- a/arch/x86/kernel/ptrace.c
21793+++ b/arch/x86/kernel/ptrace.c
21794@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
21795 {
21796 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
21797 unsigned long sp = (unsigned long)&regs->sp;
21798- struct thread_info *tinfo;
21799
21800- if (context == (sp & ~(THREAD_SIZE - 1)))
21801+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
21802 return sp;
21803
21804- tinfo = (struct thread_info *)context;
21805- if (tinfo->previous_esp)
21806- return tinfo->previous_esp;
21807+ sp = *(unsigned long *)context;
21808+ if (sp)
21809+ return sp;
21810
21811 return (unsigned long)regs;
21812 }
21813@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
21814 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
21815 {
21816 int i;
21817- int dr7 = 0;
21818+ unsigned long dr7 = 0;
21819 struct arch_hw_breakpoint *info;
21820
21821 for (i = 0; i < HBP_NUM; i++) {
21822@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
21823 unsigned long addr, unsigned long data)
21824 {
21825 int ret;
21826- unsigned long __user *datap = (unsigned long __user *)data;
21827+ unsigned long __user *datap = (__force unsigned long __user *)data;
21828
21829 switch (request) {
21830 /* read the word at location addr in the USER area. */
21831@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
21832 if ((int) addr < 0)
21833 return -EIO;
21834 ret = do_get_thread_area(child, addr,
21835- (struct user_desc __user *)data);
21836+ (__force struct user_desc __user *) data);
21837 break;
21838
21839 case PTRACE_SET_THREAD_AREA:
21840 if ((int) addr < 0)
21841 return -EIO;
21842 ret = do_set_thread_area(child, addr,
21843- (struct user_desc __user *)data, 0);
21844+ (__force struct user_desc __user *) data, 0);
21845 break;
21846 #endif
21847
21848@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
21849
21850 #ifdef CONFIG_X86_64
21851
21852-static struct user_regset x86_64_regsets[] __read_mostly = {
21853+static user_regset_no_const x86_64_regsets[] __read_only = {
21854 [REGSET_GENERAL] = {
21855 .core_note_type = NT_PRSTATUS,
21856 .n = sizeof(struct user_regs_struct) / sizeof(long),
21857@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
21858 #endif /* CONFIG_X86_64 */
21859
21860 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
21861-static struct user_regset x86_32_regsets[] __read_mostly = {
21862+static user_regset_no_const x86_32_regsets[] __read_only = {
21863 [REGSET_GENERAL] = {
21864 .core_note_type = NT_PRSTATUS,
21865 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
21866@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
21867 */
21868 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
21869
21870-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
21871+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
21872 {
21873 #ifdef CONFIG_X86_64
21874 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
21875@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
21876 memset(info, 0, sizeof(*info));
21877 info->si_signo = SIGTRAP;
21878 info->si_code = si_code;
21879- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
21880+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
21881 }
21882
21883 void user_single_step_siginfo(struct task_struct *tsk,
21884@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
21885 # define IS_IA32 0
21886 #endif
21887
21888+#ifdef CONFIG_GRKERNSEC_SETXID
21889+extern void gr_delayed_cred_worker(void);
21890+#endif
21891+
21892 /*
21893 * We must return the syscall number to actually look up in the table.
21894 * This can be -1L to skip running any syscall at all.
21895@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
21896
21897 user_exit();
21898
21899+#ifdef CONFIG_GRKERNSEC_SETXID
21900+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
21901+ gr_delayed_cred_worker();
21902+#endif
21903+
21904 /*
21905 * If we stepped into a sysenter/syscall insn, it trapped in
21906 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
21907@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
21908 */
21909 user_exit();
21910
21911+#ifdef CONFIG_GRKERNSEC_SETXID
21912+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
21913+ gr_delayed_cred_worker();
21914+#endif
21915+
21916 audit_syscall_exit(regs);
21917
21918 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
21919diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
21920index 85c3959..76b89f9 100644
21921--- a/arch/x86/kernel/pvclock.c
21922+++ b/arch/x86/kernel/pvclock.c
21923@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
21924 return pv_tsc_khz;
21925 }
21926
21927-static atomic64_t last_value = ATOMIC64_INIT(0);
21928+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
21929
21930 void pvclock_resume(void)
21931 {
21932- atomic64_set(&last_value, 0);
21933+ atomic64_set_unchecked(&last_value, 0);
21934 }
21935
21936 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
21937@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
21938 * updating at the same time, and one of them could be slightly behind,
21939 * making the assumption that last_value always go forward fail to hold.
21940 */
21941- last = atomic64_read(&last_value);
21942+ last = atomic64_read_unchecked(&last_value);
21943 do {
21944 if (ret < last)
21945 return last;
21946- last = atomic64_cmpxchg(&last_value, last, ret);
21947+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
21948 } while (unlikely(last != ret));
21949
21950 return ret;
21951diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
21952index 76fa1e9..abf09ea 100644
21953--- a/arch/x86/kernel/reboot.c
21954+++ b/arch/x86/kernel/reboot.c
21955@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
21956 EXPORT_SYMBOL(pm_power_off);
21957
21958 static const struct desc_ptr no_idt = {};
21959-static int reboot_mode;
21960+static unsigned short reboot_mode;
21961 enum reboot_type reboot_type = BOOT_ACPI;
21962 int reboot_force;
21963
21964@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
21965
21966 void __noreturn machine_real_restart(unsigned int type)
21967 {
21968+
21969+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
21970+ struct desc_struct *gdt;
21971+#endif
21972+
21973 local_irq_disable();
21974
21975 /*
21976@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
21977
21978 /* Jump to the identity-mapped low memory code */
21979 #ifdef CONFIG_X86_32
21980- asm volatile("jmpl *%0" : :
21981+
21982+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
21983+ gdt = get_cpu_gdt_table(smp_processor_id());
21984+ pax_open_kernel();
21985+#ifdef CONFIG_PAX_MEMORY_UDEREF
21986+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
21987+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
21988+ loadsegment(ds, __KERNEL_DS);
21989+ loadsegment(es, __KERNEL_DS);
21990+ loadsegment(ss, __KERNEL_DS);
21991+#endif
21992+#ifdef CONFIG_PAX_KERNEXEC
21993+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
21994+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
21995+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
21996+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
21997+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
21998+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
21999+#endif
22000+ pax_close_kernel();
22001+#endif
22002+
22003+ asm volatile("ljmpl *%0" : :
22004 "rm" (real_mode_header->machine_real_restart_asm),
22005 "a" (type));
22006 #else
22007@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22008 * try to force a triple fault and then cycle between hitting the keyboard
22009 * controller and doing that
22010 */
22011-static void native_machine_emergency_restart(void)
22012+static void __noreturn native_machine_emergency_restart(void)
22013 {
22014 int i;
22015 int attempt = 0;
22016@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22017 #endif
22018 }
22019
22020-static void __machine_emergency_restart(int emergency)
22021+static void __noreturn __machine_emergency_restart(int emergency)
22022 {
22023 reboot_emergency = emergency;
22024 machine_ops.emergency_restart();
22025 }
22026
22027-static void native_machine_restart(char *__unused)
22028+static void __noreturn native_machine_restart(char *__unused)
22029 {
22030 pr_notice("machine restart\n");
22031
22032@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22033 __machine_emergency_restart(0);
22034 }
22035
22036-static void native_machine_halt(void)
22037+static void __noreturn native_machine_halt(void)
22038 {
22039 /* Stop other cpus and apics */
22040 machine_shutdown();
22041@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22042 stop_this_cpu(NULL);
22043 }
22044
22045-static void native_machine_power_off(void)
22046+static void __noreturn native_machine_power_off(void)
22047 {
22048 if (pm_power_off) {
22049 if (!reboot_force)
22050@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22051 }
22052 /* A fallback in case there is no PM info available */
22053 tboot_shutdown(TB_SHUTDOWN_HALT);
22054+ unreachable();
22055 }
22056
22057-struct machine_ops machine_ops = {
22058+struct machine_ops machine_ops __read_only = {
22059 .power_off = native_machine_power_off,
22060 .shutdown = native_machine_shutdown,
22061 .emergency_restart = native_machine_emergency_restart,
22062diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22063index 7a6f3b3..bed145d7 100644
22064--- a/arch/x86/kernel/relocate_kernel_64.S
22065+++ b/arch/x86/kernel/relocate_kernel_64.S
22066@@ -11,6 +11,7 @@
22067 #include <asm/kexec.h>
22068 #include <asm/processor-flags.h>
22069 #include <asm/pgtable_types.h>
22070+#include <asm/alternative-asm.h>
22071
22072 /*
22073 * Must be relocatable PIC code callable as a C function
22074@@ -160,13 +161,14 @@ identity_mapped:
22075 xorq %rbp, %rbp
22076 xorq %r8, %r8
22077 xorq %r9, %r9
22078- xorq %r10, %r9
22079+ xorq %r10, %r10
22080 xorq %r11, %r11
22081 xorq %r12, %r12
22082 xorq %r13, %r13
22083 xorq %r14, %r14
22084 xorq %r15, %r15
22085
22086+ pax_force_retaddr 0, 1
22087 ret
22088
22089 1:
22090diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22091index 8b24289..d37b58b 100644
22092--- a/arch/x86/kernel/setup.c
22093+++ b/arch/x86/kernel/setup.c
22094@@ -437,7 +437,7 @@ static void __init parse_setup_data(void)
22095
22096 switch (data->type) {
22097 case SETUP_E820_EXT:
22098- parse_e820_ext(data);
22099+ parse_e820_ext((struct setup_data __force_kernel *)data);
22100 break;
22101 case SETUP_DTB:
22102 add_dtb(pa_data);
22103@@ -706,7 +706,7 @@ static void __init trim_bios_range(void)
22104 * area (640->1Mb) as ram even though it is not.
22105 * take them out.
22106 */
22107- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22108+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22109
22110 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22111 }
22112@@ -830,14 +830,14 @@ void __init setup_arch(char **cmdline_p)
22113
22114 if (!boot_params.hdr.root_flags)
22115 root_mountflags &= ~MS_RDONLY;
22116- init_mm.start_code = (unsigned long) _text;
22117- init_mm.end_code = (unsigned long) _etext;
22118+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22119+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22120 init_mm.end_data = (unsigned long) _edata;
22121 init_mm.brk = _brk_end;
22122
22123- code_resource.start = virt_to_phys(_text);
22124- code_resource.end = virt_to_phys(_etext)-1;
22125- data_resource.start = virt_to_phys(_etext);
22126+ code_resource.start = virt_to_phys(ktla_ktva(_text));
22127+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
22128+ data_resource.start = virt_to_phys(_sdata);
22129 data_resource.end = virt_to_phys(_edata)-1;
22130 bss_resource.start = virt_to_phys(&__bss_start);
22131 bss_resource.end = virt_to_phys(&__bss_stop)-1;
22132diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22133index 5cdff03..5810740 100644
22134--- a/arch/x86/kernel/setup_percpu.c
22135+++ b/arch/x86/kernel/setup_percpu.c
22136@@ -21,19 +21,17 @@
22137 #include <asm/cpu.h>
22138 #include <asm/stackprotector.h>
22139
22140-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22141+#ifdef CONFIG_SMP
22142+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22143 EXPORT_PER_CPU_SYMBOL(cpu_number);
22144+#endif
22145
22146-#ifdef CONFIG_X86_64
22147 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22148-#else
22149-#define BOOT_PERCPU_OFFSET 0
22150-#endif
22151
22152 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22153 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22154
22155-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22156+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22157 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22158 };
22159 EXPORT_SYMBOL(__per_cpu_offset);
22160@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22161 {
22162 #ifdef CONFIG_X86_32
22163 struct desc_struct gdt;
22164+ unsigned long base = per_cpu_offset(cpu);
22165
22166- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22167- 0x2 | DESCTYPE_S, 0x8);
22168- gdt.s = 1;
22169+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22170+ 0x83 | DESCTYPE_S, 0xC);
22171 write_gdt_entry(get_cpu_gdt_table(cpu),
22172 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22173 #endif
22174@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22175 /* alrighty, percpu areas up and running */
22176 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22177 for_each_possible_cpu(cpu) {
22178+#ifdef CONFIG_CC_STACKPROTECTOR
22179+#ifdef CONFIG_X86_32
22180+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22181+#endif
22182+#endif
22183 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22184 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22185 per_cpu(cpu_number, cpu) = cpu;
22186@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22187 */
22188 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22189 #endif
22190+#ifdef CONFIG_CC_STACKPROTECTOR
22191+#ifdef CONFIG_X86_32
22192+ if (!cpu)
22193+ per_cpu(stack_canary.canary, cpu) = canary;
22194+#endif
22195+#endif
22196 /*
22197 * Up to this point, the boot CPU has been using .init.data
22198 * area. Reload any changed state for the boot CPU.
22199diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22200index d6bf1f3..3ffce5a 100644
22201--- a/arch/x86/kernel/signal.c
22202+++ b/arch/x86/kernel/signal.c
22203@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22204 * Align the stack pointer according to the i386 ABI,
22205 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22206 */
22207- sp = ((sp + 4) & -16ul) - 4;
22208+ sp = ((sp - 12) & -16ul) - 4;
22209 #else /* !CONFIG_X86_32 */
22210 sp = round_down(sp, 16) - 8;
22211 #endif
22212@@ -304,9 +304,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22213 }
22214
22215 if (current->mm->context.vdso)
22216- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22217+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22218 else
22219- restorer = &frame->retcode;
22220+ restorer = (void __user *)&frame->retcode;
22221 if (ka->sa.sa_flags & SA_RESTORER)
22222 restorer = ka->sa.sa_restorer;
22223
22224@@ -320,7 +320,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
22225 * reasons and because gdb uses it as a signature to notice
22226 * signal handler stack frames.
22227 */
22228- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22229+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22230
22231 if (err)
22232 return -EFAULT;
22233@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22234 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22235
22236 /* Set up to return from userspace. */
22237- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22238+ if (current->mm->context.vdso)
22239+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22240+ else
22241+ restorer = (void __user *)&frame->retcode;
22242 if (ka->sa.sa_flags & SA_RESTORER)
22243 restorer = ka->sa.sa_restorer;
22244 put_user_ex(restorer, &frame->pretcode);
22245@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
22246 * reasons and because gdb uses it as a signature to notice
22247 * signal handler stack frames.
22248 */
22249- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22250+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22251 } put_user_catch(err);
22252
22253 err |= copy_siginfo_to_user(&frame->info, info);
22254diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22255index 48d2b7d..90d328a 100644
22256--- a/arch/x86/kernel/smp.c
22257+++ b/arch/x86/kernel/smp.c
22258@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22259
22260 __setup("nonmi_ipi", nonmi_ipi_setup);
22261
22262-struct smp_ops smp_ops = {
22263+struct smp_ops smp_ops __read_only = {
22264 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22265 .smp_prepare_cpus = native_smp_prepare_cpus,
22266 .smp_cpus_done = native_smp_cpus_done,
22267diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22268index ed0fe38..87fc692 100644
22269--- a/arch/x86/kernel/smpboot.c
22270+++ b/arch/x86/kernel/smpboot.c
22271@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22272 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22273 (THREAD_SIZE + task_stack_page(idle))) - 1);
22274 per_cpu(current_task, cpu) = idle;
22275+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22276
22277 #ifdef CONFIG_X86_32
22278 /* Stack for startup_32 can be just as for start_secondary onwards */
22279@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22280 #else
22281 clear_tsk_thread_flag(idle, TIF_FORK);
22282 initial_gs = per_cpu_offset(cpu);
22283- per_cpu(kernel_stack, cpu) =
22284- (unsigned long)task_stack_page(idle) -
22285- KERNEL_STACK_OFFSET + THREAD_SIZE;
22286+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22287 #endif
22288+
22289+ pax_open_kernel();
22290 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22291+ pax_close_kernel();
22292+
22293 initial_code = (unsigned long)start_secondary;
22294 stack_start = idle->thread.sp;
22295
22296@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22297 /* the FPU context is blank, nobody can own it */
22298 __cpu_disable_lazy_restore(cpu);
22299
22300+#ifdef CONFIG_PAX_PER_CPU_PGD
22301+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22302+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22303+ KERNEL_PGD_PTRS);
22304+#endif
22305+
22306+ /* the FPU context is blank, nobody can own it */
22307+ __cpu_disable_lazy_restore(cpu);
22308+
22309 err = do_boot_cpu(apicid, cpu, tidle);
22310 if (err) {
22311 pr_debug("do_boot_cpu failed %d\n", err);
22312diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22313index 9b4d51d..5d28b58 100644
22314--- a/arch/x86/kernel/step.c
22315+++ b/arch/x86/kernel/step.c
22316@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22317 struct desc_struct *desc;
22318 unsigned long base;
22319
22320- seg &= ~7UL;
22321+ seg >>= 3;
22322
22323 mutex_lock(&child->mm->context.lock);
22324- if (unlikely((seg >> 3) >= child->mm->context.size))
22325+ if (unlikely(seg >= child->mm->context.size))
22326 addr = -1L; /* bogus selector, access would fault */
22327 else {
22328 desc = child->mm->context.ldt + seg;
22329@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22330 addr += base;
22331 }
22332 mutex_unlock(&child->mm->context.lock);
22333- }
22334+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22335+ addr = ktla_ktva(addr);
22336
22337 return addr;
22338 }
22339@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22340 unsigned char opcode[15];
22341 unsigned long addr = convert_ip_to_linear(child, regs);
22342
22343+ if (addr == -EINVAL)
22344+ return 0;
22345+
22346 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22347 for (i = 0; i < copied; i++) {
22348 switch (opcode[i]) {
22349diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22350new file mode 100644
22351index 0000000..26bb1af
22352--- /dev/null
22353+++ b/arch/x86/kernel/sys_i386_32.c
22354@@ -0,0 +1,249 @@
22355+/*
22356+ * This file contains various random system calls that
22357+ * have a non-standard calling sequence on the Linux/i386
22358+ * platform.
22359+ */
22360+
22361+#include <linux/errno.h>
22362+#include <linux/sched.h>
22363+#include <linux/mm.h>
22364+#include <linux/fs.h>
22365+#include <linux/smp.h>
22366+#include <linux/sem.h>
22367+#include <linux/msg.h>
22368+#include <linux/shm.h>
22369+#include <linux/stat.h>
22370+#include <linux/syscalls.h>
22371+#include <linux/mman.h>
22372+#include <linux/file.h>
22373+#include <linux/utsname.h>
22374+#include <linux/ipc.h>
22375+
22376+#include <linux/uaccess.h>
22377+#include <linux/unistd.h>
22378+
22379+#include <asm/syscalls.h>
22380+
22381+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22382+{
22383+ unsigned long pax_task_size = TASK_SIZE;
22384+
22385+#ifdef CONFIG_PAX_SEGMEXEC
22386+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22387+ pax_task_size = SEGMEXEC_TASK_SIZE;
22388+#endif
22389+
22390+ if (len > pax_task_size || addr > pax_task_size - len)
22391+ return -EINVAL;
22392+
22393+ return 0;
22394+}
22395+
22396+unsigned long
22397+arch_get_unmapped_area(struct file *filp, unsigned long addr,
22398+ unsigned long len, unsigned long pgoff, unsigned long flags)
22399+{
22400+ struct mm_struct *mm = current->mm;
22401+ struct vm_area_struct *vma;
22402+ unsigned long start_addr, pax_task_size = TASK_SIZE;
22403+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22404+
22405+#ifdef CONFIG_PAX_SEGMEXEC
22406+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22407+ pax_task_size = SEGMEXEC_TASK_SIZE;
22408+#endif
22409+
22410+ pax_task_size -= PAGE_SIZE;
22411+
22412+ if (len > pax_task_size)
22413+ return -ENOMEM;
22414+
22415+ if (flags & MAP_FIXED)
22416+ return addr;
22417+
22418+#ifdef CONFIG_PAX_RANDMMAP
22419+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22420+#endif
22421+
22422+ if (addr) {
22423+ addr = PAGE_ALIGN(addr);
22424+ if (pax_task_size - len >= addr) {
22425+ vma = find_vma(mm, addr);
22426+ if (check_heap_stack_gap(vma, addr, len, offset))
22427+ return addr;
22428+ }
22429+ }
22430+ if (len > mm->cached_hole_size) {
22431+ start_addr = addr = mm->free_area_cache;
22432+ } else {
22433+ start_addr = addr = mm->mmap_base;
22434+ mm->cached_hole_size = 0;
22435+ }
22436+
22437+#ifdef CONFIG_PAX_PAGEEXEC
22438+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
22439+ start_addr = 0x00110000UL;
22440+
22441+#ifdef CONFIG_PAX_RANDMMAP
22442+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22443+ start_addr += mm->delta_mmap & 0x03FFF000UL;
22444+#endif
22445+
22446+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
22447+ start_addr = addr = mm->mmap_base;
22448+ else
22449+ addr = start_addr;
22450+ }
22451+#endif
22452+
22453+full_search:
22454+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
22455+ /* At this point: (!vma || addr < vma->vm_end). */
22456+ if (pax_task_size - len < addr) {
22457+ /*
22458+ * Start a new search - just in case we missed
22459+ * some holes.
22460+ */
22461+ if (start_addr != mm->mmap_base) {
22462+ start_addr = addr = mm->mmap_base;
22463+ mm->cached_hole_size = 0;
22464+ goto full_search;
22465+ }
22466+ return -ENOMEM;
22467+ }
22468+ if (check_heap_stack_gap(vma, addr, len, offset))
22469+ break;
22470+ if (addr + mm->cached_hole_size < vma->vm_start)
22471+ mm->cached_hole_size = vma->vm_start - addr;
22472+ addr = vma->vm_end;
22473+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
22474+ start_addr = addr = mm->mmap_base;
22475+ mm->cached_hole_size = 0;
22476+ goto full_search;
22477+ }
22478+ }
22479+
22480+ /*
22481+ * Remember the place where we stopped the search:
22482+ */
22483+ mm->free_area_cache = addr + len;
22484+ return addr;
22485+}
22486+
22487+unsigned long
22488+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22489+ const unsigned long len, const unsigned long pgoff,
22490+ const unsigned long flags)
22491+{
22492+ struct vm_area_struct *vma;
22493+ struct mm_struct *mm = current->mm;
22494+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
22495+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22496+
22497+#ifdef CONFIG_PAX_SEGMEXEC
22498+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22499+ pax_task_size = SEGMEXEC_TASK_SIZE;
22500+#endif
22501+
22502+ pax_task_size -= PAGE_SIZE;
22503+
22504+ /* requested length too big for entire address space */
22505+ if (len > pax_task_size)
22506+ return -ENOMEM;
22507+
22508+ if (flags & MAP_FIXED)
22509+ return addr;
22510+
22511+#ifdef CONFIG_PAX_PAGEEXEC
22512+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
22513+ goto bottomup;
22514+#endif
22515+
22516+#ifdef CONFIG_PAX_RANDMMAP
22517+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22518+#endif
22519+
22520+ /* requesting a specific address */
22521+ if (addr) {
22522+ addr = PAGE_ALIGN(addr);
22523+ if (pax_task_size - len >= addr) {
22524+ vma = find_vma(mm, addr);
22525+ if (check_heap_stack_gap(vma, addr, len, offset))
22526+ return addr;
22527+ }
22528+ }
22529+
22530+ /* check if free_area_cache is useful for us */
22531+ if (len <= mm->cached_hole_size) {
22532+ mm->cached_hole_size = 0;
22533+ mm->free_area_cache = mm->mmap_base;
22534+ }
22535+
22536+ /* either no address requested or can't fit in requested address hole */
22537+ addr = mm->free_area_cache;
22538+
22539+ /* make sure it can fit in the remaining address space */
22540+ if (addr > len) {
22541+ vma = find_vma(mm, addr-len);
22542+ if (check_heap_stack_gap(vma, addr - len, len, offset))
22543+ /* remember the address as a hint for next time */
22544+ return (mm->free_area_cache = addr-len);
22545+ }
22546+
22547+ if (mm->mmap_base < len)
22548+ goto bottomup;
22549+
22550+ addr = mm->mmap_base-len;
22551+
22552+ do {
22553+ /*
22554+ * Lookup failure means no vma is above this address,
22555+ * else if new region fits below vma->vm_start,
22556+ * return with success:
22557+ */
22558+ vma = find_vma(mm, addr);
22559+ if (check_heap_stack_gap(vma, addr, len, offset))
22560+ /* remember the address as a hint for next time */
22561+ return (mm->free_area_cache = addr);
22562+
22563+ /* remember the largest hole we saw so far */
22564+ if (addr + mm->cached_hole_size < vma->vm_start)
22565+ mm->cached_hole_size = vma->vm_start - addr;
22566+
22567+ /* try just below the current vma->vm_start */
22568+ addr = skip_heap_stack_gap(vma, len, offset);
22569+ } while (!IS_ERR_VALUE(addr));
22570+
22571+bottomup:
22572+ /*
22573+ * A failed mmap() very likely causes application failure,
22574+ * so fall back to the bottom-up function here. This scenario
22575+ * can happen with large stack limits and large mmap()
22576+ * allocations.
22577+ */
22578+
22579+#ifdef CONFIG_PAX_SEGMEXEC
22580+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22581+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
22582+ else
22583+#endif
22584+
22585+ mm->mmap_base = TASK_UNMAPPED_BASE;
22586+
22587+#ifdef CONFIG_PAX_RANDMMAP
22588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22589+ mm->mmap_base += mm->delta_mmap;
22590+#endif
22591+
22592+ mm->free_area_cache = mm->mmap_base;
22593+ mm->cached_hole_size = ~0UL;
22594+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
22595+ /*
22596+ * Restore the topdown base:
22597+ */
22598+ mm->mmap_base = base;
22599+ mm->free_area_cache = base;
22600+ mm->cached_hole_size = ~0UL;
22601+
22602+ return addr;
22603+}
22604diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
22605index 97ef74b..57a1882 100644
22606--- a/arch/x86/kernel/sys_x86_64.c
22607+++ b/arch/x86/kernel/sys_x86_64.c
22608@@ -81,8 +81,8 @@ out:
22609 return error;
22610 }
22611
22612-static void find_start_end(unsigned long flags, unsigned long *begin,
22613- unsigned long *end)
22614+static void find_start_end(struct mm_struct *mm, unsigned long flags,
22615+ unsigned long *begin, unsigned long *end)
22616 {
22617 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
22618 unsigned long new_begin;
22619@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
22620 *begin = new_begin;
22621 }
22622 } else {
22623- *begin = TASK_UNMAPPED_BASE;
22624+ *begin = mm->mmap_base;
22625 *end = TASK_SIZE;
22626 }
22627 }
22628@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
22629 struct vm_area_struct *vma;
22630 struct vm_unmapped_area_info info;
22631 unsigned long begin, end;
22632+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22633
22634 if (flags & MAP_FIXED)
22635 return addr;
22636
22637- find_start_end(flags, &begin, &end);
22638+ find_start_end(mm, flags, &begin, &end);
22639
22640 if (len > end)
22641 return -ENOMEM;
22642
22643+#ifdef CONFIG_PAX_RANDMMAP
22644+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22645+#endif
22646+
22647 if (addr) {
22648 addr = PAGE_ALIGN(addr);
22649 vma = find_vma(mm, addr);
22650- if (end - len >= addr &&
22651- (!vma || addr + len <= vma->vm_start))
22652+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
22653 return addr;
22654 }
22655
22656@@ -161,6 +165,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
22657 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
22658 goto bottomup;
22659
22660+#ifdef CONFIG_PAX_RANDMMAP
22661+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
22662+#endif
22663+
22664 /* requesting a specific address */
22665 if (addr) {
22666 addr = PAGE_ALIGN(addr);
22667diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
22668index f84fe00..f41d9f1 100644
22669--- a/arch/x86/kernel/tboot.c
22670+++ b/arch/x86/kernel/tboot.c
22671@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
22672
22673 void tboot_shutdown(u32 shutdown_type)
22674 {
22675- void (*shutdown)(void);
22676+ void (* __noreturn shutdown)(void);
22677
22678 if (!tboot_enabled())
22679 return;
22680@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
22681
22682 switch_to_tboot_pt();
22683
22684- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
22685+ shutdown = (void *)tboot->shutdown_entry;
22686 shutdown();
22687
22688 /* should not reach here */
22689@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
22690 return 0;
22691 }
22692
22693-static atomic_t ap_wfs_count;
22694+static atomic_unchecked_t ap_wfs_count;
22695
22696 static int tboot_wait_for_aps(int num_aps)
22697 {
22698@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
22699 {
22700 switch (action) {
22701 case CPU_DYING:
22702- atomic_inc(&ap_wfs_count);
22703+ atomic_inc_unchecked(&ap_wfs_count);
22704 if (num_online_cpus() == 1)
22705- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
22706+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
22707 return NOTIFY_BAD;
22708 break;
22709 }
22710 return NOTIFY_OK;
22711 }
22712
22713-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
22714+static struct notifier_block tboot_cpu_notifier =
22715 {
22716 .notifier_call = tboot_cpu_callback,
22717 };
22718@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
22719
22720 tboot_create_trampoline();
22721
22722- atomic_set(&ap_wfs_count, 0);
22723+ atomic_set_unchecked(&ap_wfs_count, 0);
22724 register_hotcpu_notifier(&tboot_cpu_notifier);
22725
22726 acpi_os_set_prepare_sleep(&tboot_sleep);
22727diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
22728index 24d3c91..d06b473 100644
22729--- a/arch/x86/kernel/time.c
22730+++ b/arch/x86/kernel/time.c
22731@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
22732 {
22733 unsigned long pc = instruction_pointer(regs);
22734
22735- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
22736+ if (!user_mode(regs) && in_lock_functions(pc)) {
22737 #ifdef CONFIG_FRAME_POINTER
22738- return *(unsigned long *)(regs->bp + sizeof(long));
22739+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
22740 #else
22741 unsigned long *sp =
22742 (unsigned long *)kernel_stack_pointer(regs);
22743@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
22744 * or above a saved flags. Eflags has bits 22-31 zero,
22745 * kernel addresses don't.
22746 */
22747+
22748+#ifdef CONFIG_PAX_KERNEXEC
22749+ return ktla_ktva(sp[0]);
22750+#else
22751 if (sp[0] >> 22)
22752 return sp[0];
22753 if (sp[1] >> 22)
22754 return sp[1];
22755 #endif
22756+
22757+#endif
22758 }
22759 return pc;
22760 }
22761diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
22762index 9d9d2f9..cad418a 100644
22763--- a/arch/x86/kernel/tls.c
22764+++ b/arch/x86/kernel/tls.c
22765@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
22766 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
22767 return -EINVAL;
22768
22769+#ifdef CONFIG_PAX_SEGMEXEC
22770+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
22771+ return -EINVAL;
22772+#endif
22773+
22774 set_tls_desc(p, idx, &info, 1);
22775
22776 return 0;
22777@@ -204,7 +209,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
22778
22779 if (kbuf)
22780 info = kbuf;
22781- else if (__copy_from_user(infobuf, ubuf, count))
22782+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
22783 return -EFAULT;
22784 else
22785 info = infobuf;
22786diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
22787index ecffca1..95c4d13 100644
22788--- a/arch/x86/kernel/traps.c
22789+++ b/arch/x86/kernel/traps.c
22790@@ -68,12 +68,6 @@
22791 #include <asm/setup.h>
22792
22793 asmlinkage int system_call(void);
22794-
22795-/*
22796- * The IDT has to be page-aligned to simplify the Pentium
22797- * F0 0F bug workaround.
22798- */
22799-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
22800 #endif
22801
22802 DECLARE_BITMAP(used_vectors, NR_VECTORS);
22803@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
22804 }
22805
22806 static int __kprobes
22807-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22808+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
22809 struct pt_regs *regs, long error_code)
22810 {
22811 #ifdef CONFIG_X86_32
22812- if (regs->flags & X86_VM_MASK) {
22813+ if (v8086_mode(regs)) {
22814 /*
22815 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
22816 * On nmi (interrupt 2), do_trap should not be called.
22817@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22818 return -1;
22819 }
22820 #endif
22821- if (!user_mode(regs)) {
22822+ if (!user_mode_novm(regs)) {
22823 if (!fixup_exception(regs)) {
22824 tsk->thread.error_code = error_code;
22825 tsk->thread.trap_nr = trapnr;
22826+
22827+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22828+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
22829+ str = "PAX: suspicious stack segment fault";
22830+#endif
22831+
22832 die(str, regs, error_code);
22833 }
22834+
22835+#ifdef CONFIG_PAX_REFCOUNT
22836+ if (trapnr == 4)
22837+ pax_report_refcount_overflow(regs);
22838+#endif
22839+
22840 return 0;
22841 }
22842
22843@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
22844 }
22845
22846 static void __kprobes
22847-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
22848+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
22849 long error_code, siginfo_t *info)
22850 {
22851 struct task_struct *tsk = current;
22852@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
22853 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
22854 printk_ratelimit()) {
22855 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
22856- tsk->comm, tsk->pid, str,
22857+ tsk->comm, task_pid_nr(tsk), str,
22858 regs->ip, regs->sp, error_code);
22859 print_vma_addr(" in ", regs->ip);
22860 pr_cont("\n");
22861@@ -266,7 +272,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
22862 conditional_sti(regs);
22863
22864 #ifdef CONFIG_X86_32
22865- if (regs->flags & X86_VM_MASK) {
22866+ if (v8086_mode(regs)) {
22867 local_irq_enable();
22868 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
22869 goto exit;
22870@@ -274,18 +280,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
22871 #endif
22872
22873 tsk = current;
22874- if (!user_mode(regs)) {
22875+ if (!user_mode_novm(regs)) {
22876 if (fixup_exception(regs))
22877 goto exit;
22878
22879 tsk->thread.error_code = error_code;
22880 tsk->thread.trap_nr = X86_TRAP_GP;
22881 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
22882- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
22883+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
22884+
22885+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
22886+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
22887+ die("PAX: suspicious general protection fault", regs, error_code);
22888+ else
22889+#endif
22890+
22891 die("general protection fault", regs, error_code);
22892+ }
22893 goto exit;
22894 }
22895
22896+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
22897+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
22898+ struct mm_struct *mm = tsk->mm;
22899+ unsigned long limit;
22900+
22901+ down_write(&mm->mmap_sem);
22902+ limit = mm->context.user_cs_limit;
22903+ if (limit < TASK_SIZE) {
22904+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
22905+ up_write(&mm->mmap_sem);
22906+ return;
22907+ }
22908+ up_write(&mm->mmap_sem);
22909+ }
22910+#endif
22911+
22912 tsk->thread.error_code = error_code;
22913 tsk->thread.trap_nr = X86_TRAP_GP;
22914
22915@@ -440,7 +470,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
22916 /* It's safe to allow irq's after DR6 has been saved */
22917 preempt_conditional_sti(regs);
22918
22919- if (regs->flags & X86_VM_MASK) {
22920+ if (v8086_mode(regs)) {
22921 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
22922 X86_TRAP_DB);
22923 preempt_conditional_cli(regs);
22924@@ -455,7 +485,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
22925 * We already checked v86 mode above, so we can check for kernel mode
22926 * by just checking the CPL of CS.
22927 */
22928- if ((dr6 & DR_STEP) && !user_mode(regs)) {
22929+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
22930 tsk->thread.debugreg6 &= ~DR_STEP;
22931 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
22932 regs->flags &= ~X86_EFLAGS_TF;
22933@@ -487,7 +517,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
22934 return;
22935 conditional_sti(regs);
22936
22937- if (!user_mode_vm(regs))
22938+ if (!user_mode(regs))
22939 {
22940 if (!fixup_exception(regs)) {
22941 task->thread.error_code = error_code;
22942diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
22943index c71025b..b117501 100644
22944--- a/arch/x86/kernel/uprobes.c
22945+++ b/arch/x86/kernel/uprobes.c
22946@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
22947 int ret = NOTIFY_DONE;
22948
22949 /* We are only interested in userspace traps */
22950- if (regs && !user_mode_vm(regs))
22951+ if (regs && !user_mode(regs))
22952 return NOTIFY_DONE;
22953
22954 switch (val) {
22955diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
22956index b9242ba..50c5edd 100644
22957--- a/arch/x86/kernel/verify_cpu.S
22958+++ b/arch/x86/kernel/verify_cpu.S
22959@@ -20,6 +20,7 @@
22960 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
22961 * arch/x86/kernel/trampoline_64.S: secondary processor verification
22962 * arch/x86/kernel/head_32.S: processor startup
22963+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
22964 *
22965 * verify_cpu, returns the status of longmode and SSE in register %eax.
22966 * 0: Success 1: Failure
22967diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
22968index 1dfe69c..a3df6f6 100644
22969--- a/arch/x86/kernel/vm86_32.c
22970+++ b/arch/x86/kernel/vm86_32.c
22971@@ -43,6 +43,7 @@
22972 #include <linux/ptrace.h>
22973 #include <linux/audit.h>
22974 #include <linux/stddef.h>
22975+#include <linux/grsecurity.h>
22976
22977 #include <asm/uaccess.h>
22978 #include <asm/io.h>
22979@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
22980 do_exit(SIGSEGV);
22981 }
22982
22983- tss = &per_cpu(init_tss, get_cpu());
22984+ tss = init_tss + get_cpu();
22985 current->thread.sp0 = current->thread.saved_sp0;
22986 current->thread.sysenter_cs = __KERNEL_CS;
22987 load_sp0(tss, &current->thread);
22988@@ -212,6 +213,13 @@ int sys_vm86old(struct vm86_struct __user *v86, struct pt_regs *regs)
22989 struct task_struct *tsk;
22990 int tmp, ret = -EPERM;
22991
22992+#ifdef CONFIG_GRKERNSEC_VM86
22993+ if (!capable(CAP_SYS_RAWIO)) {
22994+ gr_handle_vm86();
22995+ goto out;
22996+ }
22997+#endif
22998+
22999 tsk = current;
23000 if (tsk->thread.saved_sp0)
23001 goto out;
23002@@ -242,6 +250,14 @@ int sys_vm86(unsigned long cmd, unsigned long arg, struct pt_regs *regs)
23003 int tmp, ret;
23004 struct vm86plus_struct __user *v86;
23005
23006+#ifdef CONFIG_GRKERNSEC_VM86
23007+ if (!capable(CAP_SYS_RAWIO)) {
23008+ gr_handle_vm86();
23009+ ret = -EPERM;
23010+ goto out;
23011+ }
23012+#endif
23013+
23014 tsk = current;
23015 switch (cmd) {
23016 case VM86_REQUEST_IRQ:
23017@@ -328,7 +344,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23018 tsk->thread.saved_fs = info->regs32->fs;
23019 tsk->thread.saved_gs = get_user_gs(info->regs32);
23020
23021- tss = &per_cpu(init_tss, get_cpu());
23022+ tss = init_tss + get_cpu();
23023 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23024 if (cpu_has_sep)
23025 tsk->thread.sysenter_cs = 0;
23026@@ -535,7 +551,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23027 goto cannot_handle;
23028 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23029 goto cannot_handle;
23030- intr_ptr = (unsigned long __user *) (i << 2);
23031+ intr_ptr = (__force unsigned long __user *) (i << 2);
23032 if (get_user(segoffs, intr_ptr))
23033 goto cannot_handle;
23034 if ((segoffs >> 16) == BIOSSEG)
23035diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23036index 22a1530..8fbaaad 100644
23037--- a/arch/x86/kernel/vmlinux.lds.S
23038+++ b/arch/x86/kernel/vmlinux.lds.S
23039@@ -26,6 +26,13 @@
23040 #include <asm/page_types.h>
23041 #include <asm/cache.h>
23042 #include <asm/boot.h>
23043+#include <asm/segment.h>
23044+
23045+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23046+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23047+#else
23048+#define __KERNEL_TEXT_OFFSET 0
23049+#endif
23050
23051 #undef i386 /* in case the preprocessor is a 32bit one */
23052
23053@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23054
23055 PHDRS {
23056 text PT_LOAD FLAGS(5); /* R_E */
23057+#ifdef CONFIG_X86_32
23058+ module PT_LOAD FLAGS(5); /* R_E */
23059+#endif
23060+#ifdef CONFIG_XEN
23061+ rodata PT_LOAD FLAGS(5); /* R_E */
23062+#else
23063+ rodata PT_LOAD FLAGS(4); /* R__ */
23064+#endif
23065 data PT_LOAD FLAGS(6); /* RW_ */
23066-#ifdef CONFIG_X86_64
23067+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23068 #ifdef CONFIG_SMP
23069 percpu PT_LOAD FLAGS(6); /* RW_ */
23070 #endif
23071+ text.init PT_LOAD FLAGS(5); /* R_E */
23072+ text.exit PT_LOAD FLAGS(5); /* R_E */
23073 init PT_LOAD FLAGS(7); /* RWE */
23074-#endif
23075 note PT_NOTE FLAGS(0); /* ___ */
23076 }
23077
23078 SECTIONS
23079 {
23080 #ifdef CONFIG_X86_32
23081- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23082- phys_startup_32 = startup_32 - LOAD_OFFSET;
23083+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23084 #else
23085- . = __START_KERNEL;
23086- phys_startup_64 = startup_64 - LOAD_OFFSET;
23087+ . = __START_KERNEL;
23088 #endif
23089
23090 /* Text and read-only data */
23091- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23092- _text = .;
23093+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23094 /* bootstrapping code */
23095+#ifdef CONFIG_X86_32
23096+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23097+#else
23098+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23099+#endif
23100+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23101+ _text = .;
23102 HEAD_TEXT
23103 #ifdef CONFIG_X86_32
23104 . = ALIGN(PAGE_SIZE);
23105@@ -108,13 +128,48 @@ SECTIONS
23106 IRQENTRY_TEXT
23107 *(.fixup)
23108 *(.gnu.warning)
23109- /* End of text section */
23110- _etext = .;
23111 } :text = 0x9090
23112
23113- NOTES :text :note
23114+ . += __KERNEL_TEXT_OFFSET;
23115
23116- EXCEPTION_TABLE(16) :text = 0x9090
23117+#ifdef CONFIG_X86_32
23118+ . = ALIGN(PAGE_SIZE);
23119+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23120+
23121+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
23122+ MODULES_EXEC_VADDR = .;
23123+ BYTE(0)
23124+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23125+ . = ALIGN(HPAGE_SIZE) - 1;
23126+ MODULES_EXEC_END = .;
23127+#endif
23128+
23129+ } :module
23130+#endif
23131+
23132+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23133+ /* End of text section */
23134+ BYTE(0)
23135+ _etext = . - __KERNEL_TEXT_OFFSET;
23136+ }
23137+
23138+#ifdef CONFIG_X86_32
23139+ . = ALIGN(PAGE_SIZE);
23140+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23141+ *(.idt)
23142+ . = ALIGN(PAGE_SIZE);
23143+ *(.empty_zero_page)
23144+ *(.initial_pg_fixmap)
23145+ *(.initial_pg_pmd)
23146+ *(.initial_page_table)
23147+ *(.swapper_pg_dir)
23148+ } :rodata
23149+#endif
23150+
23151+ . = ALIGN(PAGE_SIZE);
23152+ NOTES :rodata :note
23153+
23154+ EXCEPTION_TABLE(16) :rodata
23155
23156 #if defined(CONFIG_DEBUG_RODATA)
23157 /* .text should occupy whole number of pages */
23158@@ -126,16 +181,20 @@ SECTIONS
23159
23160 /* Data */
23161 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23162+
23163+#ifdef CONFIG_PAX_KERNEXEC
23164+ . = ALIGN(HPAGE_SIZE);
23165+#else
23166+ . = ALIGN(PAGE_SIZE);
23167+#endif
23168+
23169 /* Start of data section */
23170 _sdata = .;
23171
23172 /* init_task */
23173 INIT_TASK_DATA(THREAD_SIZE)
23174
23175-#ifdef CONFIG_X86_32
23176- /* 32 bit has nosave before _edata */
23177 NOSAVE_DATA
23178-#endif
23179
23180 PAGE_ALIGNED_DATA(PAGE_SIZE)
23181
23182@@ -176,12 +235,19 @@ SECTIONS
23183 #endif /* CONFIG_X86_64 */
23184
23185 /* Init code and data - will be freed after init */
23186- . = ALIGN(PAGE_SIZE);
23187 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23188+ BYTE(0)
23189+
23190+#ifdef CONFIG_PAX_KERNEXEC
23191+ . = ALIGN(HPAGE_SIZE);
23192+#else
23193+ . = ALIGN(PAGE_SIZE);
23194+#endif
23195+
23196 __init_begin = .; /* paired with __init_end */
23197- }
23198+ } :init.begin
23199
23200-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23201+#ifdef CONFIG_SMP
23202 /*
23203 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23204 * output PHDR, so the next output section - .init.text - should
23205@@ -190,12 +256,27 @@ SECTIONS
23206 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23207 #endif
23208
23209- INIT_TEXT_SECTION(PAGE_SIZE)
23210-#ifdef CONFIG_X86_64
23211- :init
23212-#endif
23213+ . = ALIGN(PAGE_SIZE);
23214+ init_begin = .;
23215+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23216+ VMLINUX_SYMBOL(_sinittext) = .;
23217+ INIT_TEXT
23218+ VMLINUX_SYMBOL(_einittext) = .;
23219+ . = ALIGN(PAGE_SIZE);
23220+ } :text.init
23221
23222- INIT_DATA_SECTION(16)
23223+ /*
23224+ * .exit.text is discard at runtime, not link time, to deal with
23225+ * references from .altinstructions and .eh_frame
23226+ */
23227+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23228+ EXIT_TEXT
23229+ . = ALIGN(16);
23230+ } :text.exit
23231+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23232+
23233+ . = ALIGN(PAGE_SIZE);
23234+ INIT_DATA_SECTION(16) :init
23235
23236 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23237 __x86_cpu_dev_start = .;
23238@@ -257,19 +338,12 @@ SECTIONS
23239 }
23240
23241 . = ALIGN(8);
23242- /*
23243- * .exit.text is discard at runtime, not link time, to deal with
23244- * references from .altinstructions and .eh_frame
23245- */
23246- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23247- EXIT_TEXT
23248- }
23249
23250 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23251 EXIT_DATA
23252 }
23253
23254-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23255+#ifndef CONFIG_SMP
23256 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23257 #endif
23258
23259@@ -288,16 +362,10 @@ SECTIONS
23260 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23261 __smp_locks = .;
23262 *(.smp_locks)
23263- . = ALIGN(PAGE_SIZE);
23264 __smp_locks_end = .;
23265+ . = ALIGN(PAGE_SIZE);
23266 }
23267
23268-#ifdef CONFIG_X86_64
23269- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23270- NOSAVE_DATA
23271- }
23272-#endif
23273-
23274 /* BSS */
23275 . = ALIGN(PAGE_SIZE);
23276 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23277@@ -313,6 +381,7 @@ SECTIONS
23278 __brk_base = .;
23279 . += 64 * 1024; /* 64k alignment slop space */
23280 *(.brk_reservation) /* areas brk users have reserved */
23281+ . = ALIGN(HPAGE_SIZE);
23282 __brk_limit = .;
23283 }
23284
23285@@ -339,13 +408,12 @@ SECTIONS
23286 * for the boot processor.
23287 */
23288 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23289-INIT_PER_CPU(gdt_page);
23290 INIT_PER_CPU(irq_stack_union);
23291
23292 /*
23293 * Build-time check on the image size:
23294 */
23295-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23296+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23297 "kernel image bigger than KERNEL_IMAGE_SIZE");
23298
23299 #ifdef CONFIG_SMP
23300diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23301index 9a907a6..f83f921 100644
23302--- a/arch/x86/kernel/vsyscall_64.c
23303+++ b/arch/x86/kernel/vsyscall_64.c
23304@@ -56,15 +56,13 @@
23305 DEFINE_VVAR(int, vgetcpu_mode);
23306 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23307
23308-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23309+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23310
23311 static int __init vsyscall_setup(char *str)
23312 {
23313 if (str) {
23314 if (!strcmp("emulate", str))
23315 vsyscall_mode = EMULATE;
23316- else if (!strcmp("native", str))
23317- vsyscall_mode = NATIVE;
23318 else if (!strcmp("none", str))
23319 vsyscall_mode = NONE;
23320 else
23321@@ -323,8 +321,7 @@ do_ret:
23322 return true;
23323
23324 sigsegv:
23325- force_sig(SIGSEGV, current);
23326- return true;
23327+ do_group_exit(SIGKILL);
23328 }
23329
23330 /*
23331@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23332 extern char __vvar_page;
23333 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23334
23335- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23336- vsyscall_mode == NATIVE
23337- ? PAGE_KERNEL_VSYSCALL
23338- : PAGE_KERNEL_VVAR);
23339+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23340 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23341 (unsigned long)VSYSCALL_START);
23342
23343diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23344index 1330dd1..d220b99 100644
23345--- a/arch/x86/kernel/x8664_ksyms_64.c
23346+++ b/arch/x86/kernel/x8664_ksyms_64.c
23347@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23348 EXPORT_SYMBOL(copy_user_generic_unrolled);
23349 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23350 EXPORT_SYMBOL(__copy_user_nocache);
23351-EXPORT_SYMBOL(_copy_from_user);
23352-EXPORT_SYMBOL(_copy_to_user);
23353
23354 EXPORT_SYMBOL(copy_page);
23355 EXPORT_SYMBOL(clear_page);
23356diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23357index 7a3d075..6cb373d 100644
23358--- a/arch/x86/kernel/x86_init.c
23359+++ b/arch/x86/kernel/x86_init.c
23360@@ -88,7 +88,7 @@ struct x86_init_ops x86_init __initdata = {
23361 },
23362 };
23363
23364-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23365+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23366 .early_percpu_clock_init = x86_init_noop,
23367 .setup_percpu_clockev = setup_secondary_APIC_clock,
23368 };
23369@@ -96,7 +96,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23370 static void default_nmi_init(void) { };
23371 static int default_i8042_detect(void) { return 1; };
23372
23373-struct x86_platform_ops x86_platform = {
23374+struct x86_platform_ops x86_platform __read_only = {
23375 .calibrate_tsc = native_calibrate_tsc,
23376 .get_wallclock = mach_get_cmos_time,
23377 .set_wallclock = mach_set_rtc_mmss,
23378@@ -110,14 +110,14 @@ struct x86_platform_ops x86_platform = {
23379 };
23380
23381 EXPORT_SYMBOL_GPL(x86_platform);
23382-struct x86_msi_ops x86_msi = {
23383+struct x86_msi_ops x86_msi __read_only = {
23384 .setup_msi_irqs = native_setup_msi_irqs,
23385 .teardown_msi_irq = native_teardown_msi_irq,
23386 .teardown_msi_irqs = default_teardown_msi_irqs,
23387 .restore_msi_irqs = default_restore_msi_irqs,
23388 };
23389
23390-struct x86_io_apic_ops x86_io_apic_ops = {
23391+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23392 .init = native_io_apic_init_mappings,
23393 .read = native_io_apic_read,
23394 .write = native_io_apic_write,
23395diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23396index ada87a3..afea76d 100644
23397--- a/arch/x86/kernel/xsave.c
23398+++ b/arch/x86/kernel/xsave.c
23399@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23400 {
23401 int err;
23402
23403+ buf = (struct xsave_struct __user *)____m(buf);
23404 if (use_xsave())
23405 err = xsave_user(buf);
23406 else if (use_fxsr())
23407@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23408 */
23409 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23410 {
23411+ buf = (void __user *)____m(buf);
23412 if (use_xsave()) {
23413 if ((unsigned long)buf % 64 || fx_only) {
23414 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23415diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23416index a20ecb5..d0e2194 100644
23417--- a/arch/x86/kvm/cpuid.c
23418+++ b/arch/x86/kvm/cpuid.c
23419@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23420 struct kvm_cpuid2 *cpuid,
23421 struct kvm_cpuid_entry2 __user *entries)
23422 {
23423- int r;
23424+ int r, i;
23425
23426 r = -E2BIG;
23427 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23428 goto out;
23429 r = -EFAULT;
23430- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23431- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23432+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23433 goto out;
23434+ for (i = 0; i < cpuid->nent; ++i) {
23435+ struct kvm_cpuid_entry2 cpuid_entry;
23436+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23437+ goto out;
23438+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
23439+ }
23440 vcpu->arch.cpuid_nent = cpuid->nent;
23441 kvm_apic_set_version(vcpu);
23442 kvm_x86_ops->cpuid_update(vcpu);
23443@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23444 struct kvm_cpuid2 *cpuid,
23445 struct kvm_cpuid_entry2 __user *entries)
23446 {
23447- int r;
23448+ int r, i;
23449
23450 r = -E2BIG;
23451 if (cpuid->nent < vcpu->arch.cpuid_nent)
23452 goto out;
23453 r = -EFAULT;
23454- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
23455- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23456+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
23457 goto out;
23458+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
23459+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
23460+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
23461+ goto out;
23462+ }
23463 return 0;
23464
23465 out:
23466diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
23467index a27e763..54bfe43 100644
23468--- a/arch/x86/kvm/emulate.c
23469+++ b/arch/x86/kvm/emulate.c
23470@@ -292,6 +292,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23471
23472 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
23473 do { \
23474+ unsigned long _tmp; \
23475 __asm__ __volatile__ ( \
23476 _PRE_EFLAGS("0", "4", "2") \
23477 _op _suffix " %"_x"3,%1; " \
23478@@ -306,8 +307,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23479 /* Raw emulation: instruction has two explicit operands. */
23480 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
23481 do { \
23482- unsigned long _tmp; \
23483- \
23484 switch ((ctxt)->dst.bytes) { \
23485 case 2: \
23486 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
23487@@ -323,7 +322,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
23488
23489 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
23490 do { \
23491- unsigned long _tmp; \
23492 switch ((ctxt)->dst.bytes) { \
23493 case 1: \
23494 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
23495diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
23496index 9392f52..0e56d77 100644
23497--- a/arch/x86/kvm/lapic.c
23498+++ b/arch/x86/kvm/lapic.c
23499@@ -55,7 +55,7 @@
23500 #define APIC_BUS_CYCLE_NS 1
23501
23502 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
23503-#define apic_debug(fmt, arg...)
23504+#define apic_debug(fmt, arg...) do {} while (0)
23505
23506 #define APIC_LVT_NUM 6
23507 /* 14 is the version for Xeon and Pentium 8.4.8*/
23508diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
23509index 891eb6d..e027900 100644
23510--- a/arch/x86/kvm/paging_tmpl.h
23511+++ b/arch/x86/kvm/paging_tmpl.h
23512@@ -208,7 +208,7 @@ retry_walk:
23513 if (unlikely(kvm_is_error_hva(host_addr)))
23514 goto error;
23515
23516- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
23517+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
23518 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
23519 goto error;
23520 walker->ptep_user[walker->level - 1] = ptep_user;
23521diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
23522index d29d3cd..ec9d522 100644
23523--- a/arch/x86/kvm/svm.c
23524+++ b/arch/x86/kvm/svm.c
23525@@ -3507,7 +3507,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
23526 int cpu = raw_smp_processor_id();
23527
23528 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
23529+
23530+ pax_open_kernel();
23531 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
23532+ pax_close_kernel();
23533+
23534 load_TR_desc();
23535 }
23536
23537@@ -3881,6 +3885,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
23538 #endif
23539 #endif
23540
23541+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
23542+ __set_fs(current_thread_info()->addr_limit);
23543+#endif
23544+
23545 reload_tss(vcpu);
23546
23547 local_irq_disable();
23548diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
23549index 9120ae1..238abc0 100644
23550--- a/arch/x86/kvm/vmx.c
23551+++ b/arch/x86/kvm/vmx.c
23552@@ -1370,7 +1370,11 @@ static void reload_tss(void)
23553 struct desc_struct *descs;
23554
23555 descs = (void *)gdt->address;
23556+
23557+ pax_open_kernel();
23558 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
23559+ pax_close_kernel();
23560+
23561 load_TR_desc();
23562 }
23563
23564@@ -1594,6 +1598,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
23565 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
23566 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
23567
23568+#ifdef CONFIG_PAX_PER_CPU_PGD
23569+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23570+#endif
23571+
23572 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
23573 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
23574 vmx->loaded_vmcs->cpu = cpu;
23575@@ -2738,8 +2746,11 @@ static __init int hardware_setup(void)
23576 if (!cpu_has_vmx_flexpriority())
23577 flexpriority_enabled = 0;
23578
23579- if (!cpu_has_vmx_tpr_shadow())
23580- kvm_x86_ops->update_cr8_intercept = NULL;
23581+ if (!cpu_has_vmx_tpr_shadow()) {
23582+ pax_open_kernel();
23583+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
23584+ pax_close_kernel();
23585+ }
23586
23587 if (enable_ept && !cpu_has_vmx_ept_2m_page())
23588 kvm_disable_largepages();
23589@@ -3782,7 +3793,10 @@ static void vmx_set_constant_host_state(void)
23590
23591 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
23592 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
23593+
23594+#ifndef CONFIG_PAX_PER_CPU_PGD
23595 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
23596+#endif
23597
23598 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
23599 #ifdef CONFIG_X86_64
23600@@ -3803,7 +3817,7 @@ static void vmx_set_constant_host_state(void)
23601 native_store_idt(&dt);
23602 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
23603
23604- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
23605+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
23606
23607 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
23608 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
23609@@ -6355,6 +6369,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23610 "jmp 2f \n\t"
23611 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
23612 "2: "
23613+
23614+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23615+ "ljmp %[cs],$3f\n\t"
23616+ "3: "
23617+#endif
23618+
23619 /* Save guest registers, load host registers, keep flags */
23620 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
23621 "pop %0 \n\t"
23622@@ -6407,6 +6427,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23623 #endif
23624 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
23625 [wordsize]"i"(sizeof(ulong))
23626+
23627+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23628+ ,[cs]"i"(__KERNEL_CS)
23629+#endif
23630+
23631 : "cc", "memory"
23632 #ifdef CONFIG_X86_64
23633 , "rax", "rbx", "rdi", "rsi"
23634@@ -6420,7 +6445,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23635 if (debugctlmsr)
23636 update_debugctlmsr(debugctlmsr);
23637
23638-#ifndef CONFIG_X86_64
23639+#ifdef CONFIG_X86_32
23640 /*
23641 * The sysexit path does not restore ds/es, so we must set them to
23642 * a reasonable value ourselves.
23643@@ -6429,8 +6454,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
23644 * may be executed in interrupt context, which saves and restore segments
23645 * around it, nullifying its effect.
23646 */
23647- loadsegment(ds, __USER_DS);
23648- loadsegment(es, __USER_DS);
23649+ loadsegment(ds, __KERNEL_DS);
23650+ loadsegment(es, __KERNEL_DS);
23651+ loadsegment(ss, __KERNEL_DS);
23652+
23653+#ifdef CONFIG_PAX_KERNEXEC
23654+ loadsegment(fs, __KERNEL_PERCPU);
23655+#endif
23656+
23657+#ifdef CONFIG_PAX_MEMORY_UDEREF
23658+ __set_fs(current_thread_info()->addr_limit);
23659+#endif
23660+
23661 #endif
23662
23663 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
23664diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
23665index c243b81..9eb193f 100644
23666--- a/arch/x86/kvm/x86.c
23667+++ b/arch/x86/kvm/x86.c
23668@@ -1692,8 +1692,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
23669 {
23670 struct kvm *kvm = vcpu->kvm;
23671 int lm = is_long_mode(vcpu);
23672- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
23673- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
23674+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
23675+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
23676 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
23677 : kvm->arch.xen_hvm_config.blob_size_32;
23678 u32 page_num = data & ~PAGE_MASK;
23679@@ -2571,6 +2571,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
23680 if (n < msr_list.nmsrs)
23681 goto out;
23682 r = -EFAULT;
23683+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
23684+ goto out;
23685 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
23686 num_msrs_to_save * sizeof(u32)))
23687 goto out;
23688@@ -2700,7 +2702,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
23689 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
23690 struct kvm_interrupt *irq)
23691 {
23692- if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
23693+ if (irq->irq >= KVM_NR_INTERRUPTS)
23694 return -EINVAL;
23695 if (irqchip_in_kernel(vcpu->kvm))
23696 return -ENXIO;
23697@@ -5213,7 +5215,7 @@ static struct notifier_block pvclock_gtod_notifier = {
23698 };
23699 #endif
23700
23701-int kvm_arch_init(void *opaque)
23702+int kvm_arch_init(const void *opaque)
23703 {
23704 int r;
23705 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
23706diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
23707index df4176c..23ce092 100644
23708--- a/arch/x86/lguest/boot.c
23709+++ b/arch/x86/lguest/boot.c
23710@@ -1200,9 +1200,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
23711 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
23712 * Launcher to reboot us.
23713 */
23714-static void lguest_restart(char *reason)
23715+static __noreturn void lguest_restart(char *reason)
23716 {
23717 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
23718+ BUG();
23719 }
23720
23721 /*G:050
23722diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
23723index 00933d5..3a64af9 100644
23724--- a/arch/x86/lib/atomic64_386_32.S
23725+++ b/arch/x86/lib/atomic64_386_32.S
23726@@ -48,6 +48,10 @@ BEGIN(read)
23727 movl (v), %eax
23728 movl 4(v), %edx
23729 RET_ENDP
23730+BEGIN(read_unchecked)
23731+ movl (v), %eax
23732+ movl 4(v), %edx
23733+RET_ENDP
23734 #undef v
23735
23736 #define v %esi
23737@@ -55,6 +59,10 @@ BEGIN(set)
23738 movl %ebx, (v)
23739 movl %ecx, 4(v)
23740 RET_ENDP
23741+BEGIN(set_unchecked)
23742+ movl %ebx, (v)
23743+ movl %ecx, 4(v)
23744+RET_ENDP
23745 #undef v
23746
23747 #define v %esi
23748@@ -70,6 +78,20 @@ RET_ENDP
23749 BEGIN(add)
23750 addl %eax, (v)
23751 adcl %edx, 4(v)
23752+
23753+#ifdef CONFIG_PAX_REFCOUNT
23754+ jno 0f
23755+ subl %eax, (v)
23756+ sbbl %edx, 4(v)
23757+ int $4
23758+0:
23759+ _ASM_EXTABLE(0b, 0b)
23760+#endif
23761+
23762+RET_ENDP
23763+BEGIN(add_unchecked)
23764+ addl %eax, (v)
23765+ adcl %edx, 4(v)
23766 RET_ENDP
23767 #undef v
23768
23769@@ -77,6 +99,24 @@ RET_ENDP
23770 BEGIN(add_return)
23771 addl (v), %eax
23772 adcl 4(v), %edx
23773+
23774+#ifdef CONFIG_PAX_REFCOUNT
23775+ into
23776+1234:
23777+ _ASM_EXTABLE(1234b, 2f)
23778+#endif
23779+
23780+ movl %eax, (v)
23781+ movl %edx, 4(v)
23782+
23783+#ifdef CONFIG_PAX_REFCOUNT
23784+2:
23785+#endif
23786+
23787+RET_ENDP
23788+BEGIN(add_return_unchecked)
23789+ addl (v), %eax
23790+ adcl 4(v), %edx
23791 movl %eax, (v)
23792 movl %edx, 4(v)
23793 RET_ENDP
23794@@ -86,6 +126,20 @@ RET_ENDP
23795 BEGIN(sub)
23796 subl %eax, (v)
23797 sbbl %edx, 4(v)
23798+
23799+#ifdef CONFIG_PAX_REFCOUNT
23800+ jno 0f
23801+ addl %eax, (v)
23802+ adcl %edx, 4(v)
23803+ int $4
23804+0:
23805+ _ASM_EXTABLE(0b, 0b)
23806+#endif
23807+
23808+RET_ENDP
23809+BEGIN(sub_unchecked)
23810+ subl %eax, (v)
23811+ sbbl %edx, 4(v)
23812 RET_ENDP
23813 #undef v
23814
23815@@ -96,6 +150,27 @@ BEGIN(sub_return)
23816 sbbl $0, %edx
23817 addl (v), %eax
23818 adcl 4(v), %edx
23819+
23820+#ifdef CONFIG_PAX_REFCOUNT
23821+ into
23822+1234:
23823+ _ASM_EXTABLE(1234b, 2f)
23824+#endif
23825+
23826+ movl %eax, (v)
23827+ movl %edx, 4(v)
23828+
23829+#ifdef CONFIG_PAX_REFCOUNT
23830+2:
23831+#endif
23832+
23833+RET_ENDP
23834+BEGIN(sub_return_unchecked)
23835+ negl %edx
23836+ negl %eax
23837+ sbbl $0, %edx
23838+ addl (v), %eax
23839+ adcl 4(v), %edx
23840 movl %eax, (v)
23841 movl %edx, 4(v)
23842 RET_ENDP
23843@@ -105,6 +180,20 @@ RET_ENDP
23844 BEGIN(inc)
23845 addl $1, (v)
23846 adcl $0, 4(v)
23847+
23848+#ifdef CONFIG_PAX_REFCOUNT
23849+ jno 0f
23850+ subl $1, (v)
23851+ sbbl $0, 4(v)
23852+ int $4
23853+0:
23854+ _ASM_EXTABLE(0b, 0b)
23855+#endif
23856+
23857+RET_ENDP
23858+BEGIN(inc_unchecked)
23859+ addl $1, (v)
23860+ adcl $0, 4(v)
23861 RET_ENDP
23862 #undef v
23863
23864@@ -114,6 +203,26 @@ BEGIN(inc_return)
23865 movl 4(v), %edx
23866 addl $1, %eax
23867 adcl $0, %edx
23868+
23869+#ifdef CONFIG_PAX_REFCOUNT
23870+ into
23871+1234:
23872+ _ASM_EXTABLE(1234b, 2f)
23873+#endif
23874+
23875+ movl %eax, (v)
23876+ movl %edx, 4(v)
23877+
23878+#ifdef CONFIG_PAX_REFCOUNT
23879+2:
23880+#endif
23881+
23882+RET_ENDP
23883+BEGIN(inc_return_unchecked)
23884+ movl (v), %eax
23885+ movl 4(v), %edx
23886+ addl $1, %eax
23887+ adcl $0, %edx
23888 movl %eax, (v)
23889 movl %edx, 4(v)
23890 RET_ENDP
23891@@ -123,6 +232,20 @@ RET_ENDP
23892 BEGIN(dec)
23893 subl $1, (v)
23894 sbbl $0, 4(v)
23895+
23896+#ifdef CONFIG_PAX_REFCOUNT
23897+ jno 0f
23898+ addl $1, (v)
23899+ adcl $0, 4(v)
23900+ int $4
23901+0:
23902+ _ASM_EXTABLE(0b, 0b)
23903+#endif
23904+
23905+RET_ENDP
23906+BEGIN(dec_unchecked)
23907+ subl $1, (v)
23908+ sbbl $0, 4(v)
23909 RET_ENDP
23910 #undef v
23911
23912@@ -132,6 +255,26 @@ BEGIN(dec_return)
23913 movl 4(v), %edx
23914 subl $1, %eax
23915 sbbl $0, %edx
23916+
23917+#ifdef CONFIG_PAX_REFCOUNT
23918+ into
23919+1234:
23920+ _ASM_EXTABLE(1234b, 2f)
23921+#endif
23922+
23923+ movl %eax, (v)
23924+ movl %edx, 4(v)
23925+
23926+#ifdef CONFIG_PAX_REFCOUNT
23927+2:
23928+#endif
23929+
23930+RET_ENDP
23931+BEGIN(dec_return_unchecked)
23932+ movl (v), %eax
23933+ movl 4(v), %edx
23934+ subl $1, %eax
23935+ sbbl $0, %edx
23936 movl %eax, (v)
23937 movl %edx, 4(v)
23938 RET_ENDP
23939@@ -143,6 +286,13 @@ BEGIN(add_unless)
23940 adcl %edx, %edi
23941 addl (v), %eax
23942 adcl 4(v), %edx
23943+
23944+#ifdef CONFIG_PAX_REFCOUNT
23945+ into
23946+1234:
23947+ _ASM_EXTABLE(1234b, 2f)
23948+#endif
23949+
23950 cmpl %eax, %ecx
23951 je 3f
23952 1:
23953@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
23954 1:
23955 addl $1, %eax
23956 adcl $0, %edx
23957+
23958+#ifdef CONFIG_PAX_REFCOUNT
23959+ into
23960+1234:
23961+ _ASM_EXTABLE(1234b, 2f)
23962+#endif
23963+
23964 movl %eax, (v)
23965 movl %edx, 4(v)
23966 movl $1, %eax
23967@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
23968 movl 4(v), %edx
23969 subl $1, %eax
23970 sbbl $0, %edx
23971+
23972+#ifdef CONFIG_PAX_REFCOUNT
23973+ into
23974+1234:
23975+ _ASM_EXTABLE(1234b, 1f)
23976+#endif
23977+
23978 js 1f
23979 movl %eax, (v)
23980 movl %edx, 4(v)
23981diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
23982index f5cc9eb..51fa319 100644
23983--- a/arch/x86/lib/atomic64_cx8_32.S
23984+++ b/arch/x86/lib/atomic64_cx8_32.S
23985@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
23986 CFI_STARTPROC
23987
23988 read64 %ecx
23989+ pax_force_retaddr
23990 ret
23991 CFI_ENDPROC
23992 ENDPROC(atomic64_read_cx8)
23993
23994+ENTRY(atomic64_read_unchecked_cx8)
23995+ CFI_STARTPROC
23996+
23997+ read64 %ecx
23998+ pax_force_retaddr
23999+ ret
24000+ CFI_ENDPROC
24001+ENDPROC(atomic64_read_unchecked_cx8)
24002+
24003 ENTRY(atomic64_set_cx8)
24004 CFI_STARTPROC
24005
24006@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24007 cmpxchg8b (%esi)
24008 jne 1b
24009
24010+ pax_force_retaddr
24011 ret
24012 CFI_ENDPROC
24013 ENDPROC(atomic64_set_cx8)
24014
24015+ENTRY(atomic64_set_unchecked_cx8)
24016+ CFI_STARTPROC
24017+
24018+1:
24019+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24020+ * are atomic on 586 and newer */
24021+ cmpxchg8b (%esi)
24022+ jne 1b
24023+
24024+ pax_force_retaddr
24025+ ret
24026+ CFI_ENDPROC
24027+ENDPROC(atomic64_set_unchecked_cx8)
24028+
24029 ENTRY(atomic64_xchg_cx8)
24030 CFI_STARTPROC
24031
24032@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24033 cmpxchg8b (%esi)
24034 jne 1b
24035
24036+ pax_force_retaddr
24037 ret
24038 CFI_ENDPROC
24039 ENDPROC(atomic64_xchg_cx8)
24040
24041-.macro addsub_return func ins insc
24042-ENTRY(atomic64_\func\()_return_cx8)
24043+.macro addsub_return func ins insc unchecked=""
24044+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24045 CFI_STARTPROC
24046 SAVE ebp
24047 SAVE ebx
24048@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24049 movl %edx, %ecx
24050 \ins\()l %esi, %ebx
24051 \insc\()l %edi, %ecx
24052+
24053+.ifb \unchecked
24054+#ifdef CONFIG_PAX_REFCOUNT
24055+ into
24056+2:
24057+ _ASM_EXTABLE(2b, 3f)
24058+#endif
24059+.endif
24060+
24061 LOCK_PREFIX
24062 cmpxchg8b (%ebp)
24063 jne 1b
24064-
24065-10:
24066 movl %ebx, %eax
24067 movl %ecx, %edx
24068+
24069+.ifb \unchecked
24070+#ifdef CONFIG_PAX_REFCOUNT
24071+3:
24072+#endif
24073+.endif
24074+
24075 RESTORE edi
24076 RESTORE esi
24077 RESTORE ebx
24078 RESTORE ebp
24079+ pax_force_retaddr
24080 ret
24081 CFI_ENDPROC
24082-ENDPROC(atomic64_\func\()_return_cx8)
24083+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24084 .endm
24085
24086 addsub_return add add adc
24087 addsub_return sub sub sbb
24088+addsub_return add add adc _unchecked
24089+addsub_return sub sub sbb _unchecked
24090
24091-.macro incdec_return func ins insc
24092-ENTRY(atomic64_\func\()_return_cx8)
24093+.macro incdec_return func ins insc unchecked=""
24094+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24095 CFI_STARTPROC
24096 SAVE ebx
24097
24098@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24099 movl %edx, %ecx
24100 \ins\()l $1, %ebx
24101 \insc\()l $0, %ecx
24102+
24103+.ifb \unchecked
24104+#ifdef CONFIG_PAX_REFCOUNT
24105+ into
24106+2:
24107+ _ASM_EXTABLE(2b, 3f)
24108+#endif
24109+.endif
24110+
24111 LOCK_PREFIX
24112 cmpxchg8b (%esi)
24113 jne 1b
24114
24115-10:
24116 movl %ebx, %eax
24117 movl %ecx, %edx
24118+
24119+.ifb \unchecked
24120+#ifdef CONFIG_PAX_REFCOUNT
24121+3:
24122+#endif
24123+.endif
24124+
24125 RESTORE ebx
24126+ pax_force_retaddr
24127 ret
24128 CFI_ENDPROC
24129-ENDPROC(atomic64_\func\()_return_cx8)
24130+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24131 .endm
24132
24133 incdec_return inc add adc
24134 incdec_return dec sub sbb
24135+incdec_return inc add adc _unchecked
24136+incdec_return dec sub sbb _unchecked
24137
24138 ENTRY(atomic64_dec_if_positive_cx8)
24139 CFI_STARTPROC
24140@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24141 movl %edx, %ecx
24142 subl $1, %ebx
24143 sbb $0, %ecx
24144+
24145+#ifdef CONFIG_PAX_REFCOUNT
24146+ into
24147+1234:
24148+ _ASM_EXTABLE(1234b, 2f)
24149+#endif
24150+
24151 js 2f
24152 LOCK_PREFIX
24153 cmpxchg8b (%esi)
24154@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24155 movl %ebx, %eax
24156 movl %ecx, %edx
24157 RESTORE ebx
24158+ pax_force_retaddr
24159 ret
24160 CFI_ENDPROC
24161 ENDPROC(atomic64_dec_if_positive_cx8)
24162@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24163 movl %edx, %ecx
24164 addl %ebp, %ebx
24165 adcl %edi, %ecx
24166+
24167+#ifdef CONFIG_PAX_REFCOUNT
24168+ into
24169+1234:
24170+ _ASM_EXTABLE(1234b, 3f)
24171+#endif
24172+
24173 LOCK_PREFIX
24174 cmpxchg8b (%esi)
24175 jne 1b
24176@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24177 CFI_ADJUST_CFA_OFFSET -8
24178 RESTORE ebx
24179 RESTORE ebp
24180+ pax_force_retaddr
24181 ret
24182 4:
24183 cmpl %edx, 4(%esp)
24184@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24185 xorl %ecx, %ecx
24186 addl $1, %ebx
24187 adcl %edx, %ecx
24188+
24189+#ifdef CONFIG_PAX_REFCOUNT
24190+ into
24191+1234:
24192+ _ASM_EXTABLE(1234b, 3f)
24193+#endif
24194+
24195 LOCK_PREFIX
24196 cmpxchg8b (%esi)
24197 jne 1b
24198@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24199 movl $1, %eax
24200 3:
24201 RESTORE ebx
24202+ pax_force_retaddr
24203 ret
24204 CFI_ENDPROC
24205 ENDPROC(atomic64_inc_not_zero_cx8)
24206diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24207index 2af5df3..62b1a5a 100644
24208--- a/arch/x86/lib/checksum_32.S
24209+++ b/arch/x86/lib/checksum_32.S
24210@@ -29,7 +29,8 @@
24211 #include <asm/dwarf2.h>
24212 #include <asm/errno.h>
24213 #include <asm/asm.h>
24214-
24215+#include <asm/segment.h>
24216+
24217 /*
24218 * computes a partial checksum, e.g. for TCP/UDP fragments
24219 */
24220@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24221
24222 #define ARGBASE 16
24223 #define FP 12
24224-
24225-ENTRY(csum_partial_copy_generic)
24226+
24227+ENTRY(csum_partial_copy_generic_to_user)
24228 CFI_STARTPROC
24229+
24230+#ifdef CONFIG_PAX_MEMORY_UDEREF
24231+ pushl_cfi %gs
24232+ popl_cfi %es
24233+ jmp csum_partial_copy_generic
24234+#endif
24235+
24236+ENTRY(csum_partial_copy_generic_from_user)
24237+
24238+#ifdef CONFIG_PAX_MEMORY_UDEREF
24239+ pushl_cfi %gs
24240+ popl_cfi %ds
24241+#endif
24242+
24243+ENTRY(csum_partial_copy_generic)
24244 subl $4,%esp
24245 CFI_ADJUST_CFA_OFFSET 4
24246 pushl_cfi %edi
24247@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24248 jmp 4f
24249 SRC(1: movw (%esi), %bx )
24250 addl $2, %esi
24251-DST( movw %bx, (%edi) )
24252+DST( movw %bx, %es:(%edi) )
24253 addl $2, %edi
24254 addw %bx, %ax
24255 adcl $0, %eax
24256@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24257 SRC(1: movl (%esi), %ebx )
24258 SRC( movl 4(%esi), %edx )
24259 adcl %ebx, %eax
24260-DST( movl %ebx, (%edi) )
24261+DST( movl %ebx, %es:(%edi) )
24262 adcl %edx, %eax
24263-DST( movl %edx, 4(%edi) )
24264+DST( movl %edx, %es:4(%edi) )
24265
24266 SRC( movl 8(%esi), %ebx )
24267 SRC( movl 12(%esi), %edx )
24268 adcl %ebx, %eax
24269-DST( movl %ebx, 8(%edi) )
24270+DST( movl %ebx, %es:8(%edi) )
24271 adcl %edx, %eax
24272-DST( movl %edx, 12(%edi) )
24273+DST( movl %edx, %es:12(%edi) )
24274
24275 SRC( movl 16(%esi), %ebx )
24276 SRC( movl 20(%esi), %edx )
24277 adcl %ebx, %eax
24278-DST( movl %ebx, 16(%edi) )
24279+DST( movl %ebx, %es:16(%edi) )
24280 adcl %edx, %eax
24281-DST( movl %edx, 20(%edi) )
24282+DST( movl %edx, %es:20(%edi) )
24283
24284 SRC( movl 24(%esi), %ebx )
24285 SRC( movl 28(%esi), %edx )
24286 adcl %ebx, %eax
24287-DST( movl %ebx, 24(%edi) )
24288+DST( movl %ebx, %es:24(%edi) )
24289 adcl %edx, %eax
24290-DST( movl %edx, 28(%edi) )
24291+DST( movl %edx, %es:28(%edi) )
24292
24293 lea 32(%esi), %esi
24294 lea 32(%edi), %edi
24295@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24296 shrl $2, %edx # This clears CF
24297 SRC(3: movl (%esi), %ebx )
24298 adcl %ebx, %eax
24299-DST( movl %ebx, (%edi) )
24300+DST( movl %ebx, %es:(%edi) )
24301 lea 4(%esi), %esi
24302 lea 4(%edi), %edi
24303 dec %edx
24304@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24305 jb 5f
24306 SRC( movw (%esi), %cx )
24307 leal 2(%esi), %esi
24308-DST( movw %cx, (%edi) )
24309+DST( movw %cx, %es:(%edi) )
24310 leal 2(%edi), %edi
24311 je 6f
24312 shll $16,%ecx
24313 SRC(5: movb (%esi), %cl )
24314-DST( movb %cl, (%edi) )
24315+DST( movb %cl, %es:(%edi) )
24316 6: addl %ecx, %eax
24317 adcl $0, %eax
24318 7:
24319@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24320
24321 6001:
24322 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24323- movl $-EFAULT, (%ebx)
24324+ movl $-EFAULT, %ss:(%ebx)
24325
24326 # zero the complete destination - computing the rest
24327 # is too much work
24328@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24329
24330 6002:
24331 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24332- movl $-EFAULT,(%ebx)
24333+ movl $-EFAULT,%ss:(%ebx)
24334 jmp 5000b
24335
24336 .previous
24337
24338+ pushl_cfi %ss
24339+ popl_cfi %ds
24340+ pushl_cfi %ss
24341+ popl_cfi %es
24342 popl_cfi %ebx
24343 CFI_RESTORE ebx
24344 popl_cfi %esi
24345@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24346 popl_cfi %ecx # equivalent to addl $4,%esp
24347 ret
24348 CFI_ENDPROC
24349-ENDPROC(csum_partial_copy_generic)
24350+ENDPROC(csum_partial_copy_generic_to_user)
24351
24352 #else
24353
24354 /* Version for PentiumII/PPro */
24355
24356 #define ROUND1(x) \
24357+ nop; nop; nop; \
24358 SRC(movl x(%esi), %ebx ) ; \
24359 addl %ebx, %eax ; \
24360- DST(movl %ebx, x(%edi) ) ;
24361+ DST(movl %ebx, %es:x(%edi)) ;
24362
24363 #define ROUND(x) \
24364+ nop; nop; nop; \
24365 SRC(movl x(%esi), %ebx ) ; \
24366 adcl %ebx, %eax ; \
24367- DST(movl %ebx, x(%edi) ) ;
24368+ DST(movl %ebx, %es:x(%edi)) ;
24369
24370 #define ARGBASE 12
24371-
24372-ENTRY(csum_partial_copy_generic)
24373+
24374+ENTRY(csum_partial_copy_generic_to_user)
24375 CFI_STARTPROC
24376+
24377+#ifdef CONFIG_PAX_MEMORY_UDEREF
24378+ pushl_cfi %gs
24379+ popl_cfi %es
24380+ jmp csum_partial_copy_generic
24381+#endif
24382+
24383+ENTRY(csum_partial_copy_generic_from_user)
24384+
24385+#ifdef CONFIG_PAX_MEMORY_UDEREF
24386+ pushl_cfi %gs
24387+ popl_cfi %ds
24388+#endif
24389+
24390+ENTRY(csum_partial_copy_generic)
24391 pushl_cfi %ebx
24392 CFI_REL_OFFSET ebx, 0
24393 pushl_cfi %edi
24394@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24395 subl %ebx, %edi
24396 lea -1(%esi),%edx
24397 andl $-32,%edx
24398- lea 3f(%ebx,%ebx), %ebx
24399+ lea 3f(%ebx,%ebx,2), %ebx
24400 testl %esi, %esi
24401 jmp *%ebx
24402 1: addl $64,%esi
24403@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24404 jb 5f
24405 SRC( movw (%esi), %dx )
24406 leal 2(%esi), %esi
24407-DST( movw %dx, (%edi) )
24408+DST( movw %dx, %es:(%edi) )
24409 leal 2(%edi), %edi
24410 je 6f
24411 shll $16,%edx
24412 5:
24413 SRC( movb (%esi), %dl )
24414-DST( movb %dl, (%edi) )
24415+DST( movb %dl, %es:(%edi) )
24416 6: addl %edx, %eax
24417 adcl $0, %eax
24418 7:
24419 .section .fixup, "ax"
24420 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
24421- movl $-EFAULT, (%ebx)
24422+ movl $-EFAULT, %ss:(%ebx)
24423 # zero the complete destination (computing the rest is too much work)
24424 movl ARGBASE+8(%esp),%edi # dst
24425 movl ARGBASE+12(%esp),%ecx # len
24426@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
24427 rep; stosb
24428 jmp 7b
24429 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24430- movl $-EFAULT, (%ebx)
24431+ movl $-EFAULT, %ss:(%ebx)
24432 jmp 7b
24433 .previous
24434
24435+#ifdef CONFIG_PAX_MEMORY_UDEREF
24436+ pushl_cfi %ss
24437+ popl_cfi %ds
24438+ pushl_cfi %ss
24439+ popl_cfi %es
24440+#endif
24441+
24442 popl_cfi %esi
24443 CFI_RESTORE esi
24444 popl_cfi %edi
24445@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
24446 CFI_RESTORE ebx
24447 ret
24448 CFI_ENDPROC
24449-ENDPROC(csum_partial_copy_generic)
24450+ENDPROC(csum_partial_copy_generic_to_user)
24451
24452 #undef ROUND
24453 #undef ROUND1
24454diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
24455index f2145cf..cea889d 100644
24456--- a/arch/x86/lib/clear_page_64.S
24457+++ b/arch/x86/lib/clear_page_64.S
24458@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
24459 movl $4096/8,%ecx
24460 xorl %eax,%eax
24461 rep stosq
24462+ pax_force_retaddr
24463 ret
24464 CFI_ENDPROC
24465 ENDPROC(clear_page_c)
24466@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
24467 movl $4096,%ecx
24468 xorl %eax,%eax
24469 rep stosb
24470+ pax_force_retaddr
24471 ret
24472 CFI_ENDPROC
24473 ENDPROC(clear_page_c_e)
24474@@ -43,6 +45,7 @@ ENTRY(clear_page)
24475 leaq 64(%rdi),%rdi
24476 jnz .Lloop
24477 nop
24478+ pax_force_retaddr
24479 ret
24480 CFI_ENDPROC
24481 .Lclear_page_end:
24482@@ -58,7 +61,7 @@ ENDPROC(clear_page)
24483
24484 #include <asm/cpufeature.h>
24485
24486- .section .altinstr_replacement,"ax"
24487+ .section .altinstr_replacement,"a"
24488 1: .byte 0xeb /* jmp <disp8> */
24489 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
24490 2: .byte 0xeb /* jmp <disp8> */
24491diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
24492index 1e572c5..2a162cd 100644
24493--- a/arch/x86/lib/cmpxchg16b_emu.S
24494+++ b/arch/x86/lib/cmpxchg16b_emu.S
24495@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
24496
24497 popf
24498 mov $1, %al
24499+ pax_force_retaddr
24500 ret
24501
24502 not_same:
24503 popf
24504 xor %al,%al
24505+ pax_force_retaddr
24506 ret
24507
24508 CFI_ENDPROC
24509diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
24510index 176cca6..1166c50 100644
24511--- a/arch/x86/lib/copy_page_64.S
24512+++ b/arch/x86/lib/copy_page_64.S
24513@@ -9,6 +9,7 @@ copy_page_rep:
24514 CFI_STARTPROC
24515 movl $4096/8, %ecx
24516 rep movsq
24517+ pax_force_retaddr
24518 ret
24519 CFI_ENDPROC
24520 ENDPROC(copy_page_rep)
24521@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
24522
24523 ENTRY(copy_page)
24524 CFI_STARTPROC
24525- subq $2*8, %rsp
24526- CFI_ADJUST_CFA_OFFSET 2*8
24527+ subq $3*8, %rsp
24528+ CFI_ADJUST_CFA_OFFSET 3*8
24529 movq %rbx, (%rsp)
24530 CFI_REL_OFFSET rbx, 0
24531 movq %r12, 1*8(%rsp)
24532 CFI_REL_OFFSET r12, 1*8
24533+ movq %r13, 2*8(%rsp)
24534+ CFI_REL_OFFSET r13, 2*8
24535
24536 movl $(4096/64)-5, %ecx
24537 .p2align 4
24538@@ -36,7 +39,7 @@ ENTRY(copy_page)
24539 movq 0x8*2(%rsi), %rdx
24540 movq 0x8*3(%rsi), %r8
24541 movq 0x8*4(%rsi), %r9
24542- movq 0x8*5(%rsi), %r10
24543+ movq 0x8*5(%rsi), %r13
24544 movq 0x8*6(%rsi), %r11
24545 movq 0x8*7(%rsi), %r12
24546
24547@@ -47,7 +50,7 @@ ENTRY(copy_page)
24548 movq %rdx, 0x8*2(%rdi)
24549 movq %r8, 0x8*3(%rdi)
24550 movq %r9, 0x8*4(%rdi)
24551- movq %r10, 0x8*5(%rdi)
24552+ movq %r13, 0x8*5(%rdi)
24553 movq %r11, 0x8*6(%rdi)
24554 movq %r12, 0x8*7(%rdi)
24555
24556@@ -66,7 +69,7 @@ ENTRY(copy_page)
24557 movq 0x8*2(%rsi), %rdx
24558 movq 0x8*3(%rsi), %r8
24559 movq 0x8*4(%rsi), %r9
24560- movq 0x8*5(%rsi), %r10
24561+ movq 0x8*5(%rsi), %r13
24562 movq 0x8*6(%rsi), %r11
24563 movq 0x8*7(%rsi), %r12
24564
24565@@ -75,7 +78,7 @@ ENTRY(copy_page)
24566 movq %rdx, 0x8*2(%rdi)
24567 movq %r8, 0x8*3(%rdi)
24568 movq %r9, 0x8*4(%rdi)
24569- movq %r10, 0x8*5(%rdi)
24570+ movq %r13, 0x8*5(%rdi)
24571 movq %r11, 0x8*6(%rdi)
24572 movq %r12, 0x8*7(%rdi)
24573
24574@@ -87,8 +90,11 @@ ENTRY(copy_page)
24575 CFI_RESTORE rbx
24576 movq 1*8(%rsp), %r12
24577 CFI_RESTORE r12
24578- addq $2*8, %rsp
24579- CFI_ADJUST_CFA_OFFSET -2*8
24580+ movq 2*8(%rsp), %r13
24581+ CFI_RESTORE r13
24582+ addq $3*8, %rsp
24583+ CFI_ADJUST_CFA_OFFSET -3*8
24584+ pax_force_retaddr
24585 ret
24586 .Lcopy_page_end:
24587 CFI_ENDPROC
24588@@ -99,7 +105,7 @@ ENDPROC(copy_page)
24589
24590 #include <asm/cpufeature.h>
24591
24592- .section .altinstr_replacement,"ax"
24593+ .section .altinstr_replacement,"a"
24594 1: .byte 0xeb /* jmp <disp8> */
24595 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
24596 2:
24597diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
24598index a30ca15..d25fab6 100644
24599--- a/arch/x86/lib/copy_user_64.S
24600+++ b/arch/x86/lib/copy_user_64.S
24601@@ -18,6 +18,7 @@
24602 #include <asm/alternative-asm.h>
24603 #include <asm/asm.h>
24604 #include <asm/smap.h>
24605+#include <asm/pgtable.h>
24606
24607 /*
24608 * By placing feature2 after feature1 in altinstructions section, we logically
24609@@ -31,7 +32,7 @@
24610 .byte 0xe9 /* 32bit jump */
24611 .long \orig-1f /* by default jump to orig */
24612 1:
24613- .section .altinstr_replacement,"ax"
24614+ .section .altinstr_replacement,"a"
24615 2: .byte 0xe9 /* near jump with 32bit immediate */
24616 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
24617 3: .byte 0xe9 /* near jump with 32bit immediate */
24618@@ -70,47 +71,20 @@
24619 #endif
24620 .endm
24621
24622-/* Standard copy_to_user with segment limit checking */
24623-ENTRY(_copy_to_user)
24624- CFI_STARTPROC
24625- GET_THREAD_INFO(%rax)
24626- movq %rdi,%rcx
24627- addq %rdx,%rcx
24628- jc bad_to_user
24629- cmpq TI_addr_limit(%rax),%rcx
24630- ja bad_to_user
24631- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
24632- copy_user_generic_unrolled,copy_user_generic_string, \
24633- copy_user_enhanced_fast_string
24634- CFI_ENDPROC
24635-ENDPROC(_copy_to_user)
24636-
24637-/* Standard copy_from_user with segment limit checking */
24638-ENTRY(_copy_from_user)
24639- CFI_STARTPROC
24640- GET_THREAD_INFO(%rax)
24641- movq %rsi,%rcx
24642- addq %rdx,%rcx
24643- jc bad_from_user
24644- cmpq TI_addr_limit(%rax),%rcx
24645- ja bad_from_user
24646- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
24647- copy_user_generic_unrolled,copy_user_generic_string, \
24648- copy_user_enhanced_fast_string
24649- CFI_ENDPROC
24650-ENDPROC(_copy_from_user)
24651-
24652 .section .fixup,"ax"
24653 /* must zero dest */
24654 ENTRY(bad_from_user)
24655 bad_from_user:
24656 CFI_STARTPROC
24657+ testl %edx,%edx
24658+ js bad_to_user
24659 movl %edx,%ecx
24660 xorl %eax,%eax
24661 rep
24662 stosb
24663 bad_to_user:
24664 movl %edx,%eax
24665+ pax_force_retaddr
24666 ret
24667 CFI_ENDPROC
24668 ENDPROC(bad_from_user)
24669@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
24670 jz 17f
24671 1: movq (%rsi),%r8
24672 2: movq 1*8(%rsi),%r9
24673-3: movq 2*8(%rsi),%r10
24674+3: movq 2*8(%rsi),%rax
24675 4: movq 3*8(%rsi),%r11
24676 5: movq %r8,(%rdi)
24677 6: movq %r9,1*8(%rdi)
24678-7: movq %r10,2*8(%rdi)
24679+7: movq %rax,2*8(%rdi)
24680 8: movq %r11,3*8(%rdi)
24681 9: movq 4*8(%rsi),%r8
24682 10: movq 5*8(%rsi),%r9
24683-11: movq 6*8(%rsi),%r10
24684+11: movq 6*8(%rsi),%rax
24685 12: movq 7*8(%rsi),%r11
24686 13: movq %r8,4*8(%rdi)
24687 14: movq %r9,5*8(%rdi)
24688-15: movq %r10,6*8(%rdi)
24689+15: movq %rax,6*8(%rdi)
24690 16: movq %r11,7*8(%rdi)
24691 leaq 64(%rsi),%rsi
24692 leaq 64(%rdi),%rdi
24693@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
24694 jnz 21b
24695 23: xor %eax,%eax
24696 ASM_CLAC
24697+ pax_force_retaddr
24698 ret
24699
24700 .section .fixup,"ax"
24701@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
24702 movsb
24703 4: xorl %eax,%eax
24704 ASM_CLAC
24705+ pax_force_retaddr
24706 ret
24707
24708 .section .fixup,"ax"
24709@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
24710 movsb
24711 2: xorl %eax,%eax
24712 ASM_CLAC
24713+ pax_force_retaddr
24714 ret
24715
24716 .section .fixup,"ax"
24717diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
24718index 6a4f43c..f5f9e26 100644
24719--- a/arch/x86/lib/copy_user_nocache_64.S
24720+++ b/arch/x86/lib/copy_user_nocache_64.S
24721@@ -8,6 +8,7 @@
24722
24723 #include <linux/linkage.h>
24724 #include <asm/dwarf2.h>
24725+#include <asm/alternative-asm.h>
24726
24727 #define FIX_ALIGNMENT 1
24728
24729@@ -16,6 +17,7 @@
24730 #include <asm/thread_info.h>
24731 #include <asm/asm.h>
24732 #include <asm/smap.h>
24733+#include <asm/pgtable.h>
24734
24735 .macro ALIGN_DESTINATION
24736 #ifdef FIX_ALIGNMENT
24737@@ -49,6 +51,15 @@
24738 */
24739 ENTRY(__copy_user_nocache)
24740 CFI_STARTPROC
24741+
24742+#ifdef CONFIG_PAX_MEMORY_UDEREF
24743+ mov $PAX_USER_SHADOW_BASE,%rcx
24744+ cmp %rcx,%rsi
24745+ jae 1f
24746+ add %rcx,%rsi
24747+1:
24748+#endif
24749+
24750 ASM_STAC
24751 cmpl $8,%edx
24752 jb 20f /* less then 8 bytes, go to byte copy loop */
24753@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
24754 jz 17f
24755 1: movq (%rsi),%r8
24756 2: movq 1*8(%rsi),%r9
24757-3: movq 2*8(%rsi),%r10
24758+3: movq 2*8(%rsi),%rax
24759 4: movq 3*8(%rsi),%r11
24760 5: movnti %r8,(%rdi)
24761 6: movnti %r9,1*8(%rdi)
24762-7: movnti %r10,2*8(%rdi)
24763+7: movnti %rax,2*8(%rdi)
24764 8: movnti %r11,3*8(%rdi)
24765 9: movq 4*8(%rsi),%r8
24766 10: movq 5*8(%rsi),%r9
24767-11: movq 6*8(%rsi),%r10
24768+11: movq 6*8(%rsi),%rax
24769 12: movq 7*8(%rsi),%r11
24770 13: movnti %r8,4*8(%rdi)
24771 14: movnti %r9,5*8(%rdi)
24772-15: movnti %r10,6*8(%rdi)
24773+15: movnti %rax,6*8(%rdi)
24774 16: movnti %r11,7*8(%rdi)
24775 leaq 64(%rsi),%rsi
24776 leaq 64(%rdi),%rdi
24777@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
24778 23: xorl %eax,%eax
24779 ASM_CLAC
24780 sfence
24781+ pax_force_retaddr
24782 ret
24783
24784 .section .fixup,"ax"
24785diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
24786index 2419d5f..953ee51 100644
24787--- a/arch/x86/lib/csum-copy_64.S
24788+++ b/arch/x86/lib/csum-copy_64.S
24789@@ -9,6 +9,7 @@
24790 #include <asm/dwarf2.h>
24791 #include <asm/errno.h>
24792 #include <asm/asm.h>
24793+#include <asm/alternative-asm.h>
24794
24795 /*
24796 * Checksum copy with exception handling.
24797@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
24798 CFI_RESTORE rbp
24799 addq $7*8, %rsp
24800 CFI_ADJUST_CFA_OFFSET -7*8
24801+ pax_force_retaddr 0, 1
24802 ret
24803 CFI_RESTORE_STATE
24804
24805diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
24806index 25b7ae8..169fafc 100644
24807--- a/arch/x86/lib/csum-wrappers_64.c
24808+++ b/arch/x86/lib/csum-wrappers_64.c
24809@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
24810 len -= 2;
24811 }
24812 }
24813- isum = csum_partial_copy_generic((__force const void *)src,
24814+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
24815 dst, len, isum, errp, NULL);
24816 if (unlikely(*errp))
24817 goto out_err;
24818@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
24819 }
24820
24821 *errp = 0;
24822- return csum_partial_copy_generic(src, (void __force *)dst,
24823+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
24824 len, isum, NULL, errp);
24825 }
24826 EXPORT_SYMBOL(csum_partial_copy_to_user);
24827diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
24828index 156b9c8..b144132 100644
24829--- a/arch/x86/lib/getuser.S
24830+++ b/arch/x86/lib/getuser.S
24831@@ -34,17 +34,40 @@
24832 #include <asm/thread_info.h>
24833 #include <asm/asm.h>
24834 #include <asm/smap.h>
24835+#include <asm/segment.h>
24836+#include <asm/pgtable.h>
24837+#include <asm/alternative-asm.h>
24838+
24839+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24840+#define __copyuser_seg gs;
24841+#else
24842+#define __copyuser_seg
24843+#endif
24844
24845 .text
24846 ENTRY(__get_user_1)
24847 CFI_STARTPROC
24848+
24849+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24850 GET_THREAD_INFO(%_ASM_DX)
24851 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24852 jae bad_get_user
24853 ASM_STAC
24854-1: movzb (%_ASM_AX),%edx
24855+
24856+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24857+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24858+ cmp %_ASM_DX,%_ASM_AX
24859+ jae 1234f
24860+ add %_ASM_DX,%_ASM_AX
24861+1234:
24862+#endif
24863+
24864+#endif
24865+
24866+1: __copyuser_seg movzb (%_ASM_AX),%edx
24867 xor %eax,%eax
24868 ASM_CLAC
24869+ pax_force_retaddr
24870 ret
24871 CFI_ENDPROC
24872 ENDPROC(__get_user_1)
24873@@ -52,14 +75,28 @@ ENDPROC(__get_user_1)
24874 ENTRY(__get_user_2)
24875 CFI_STARTPROC
24876 add $1,%_ASM_AX
24877+
24878+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24879 jc bad_get_user
24880 GET_THREAD_INFO(%_ASM_DX)
24881 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24882 jae bad_get_user
24883 ASM_STAC
24884-2: movzwl -1(%_ASM_AX),%edx
24885+
24886+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24887+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24888+ cmp %_ASM_DX,%_ASM_AX
24889+ jae 1234f
24890+ add %_ASM_DX,%_ASM_AX
24891+1234:
24892+#endif
24893+
24894+#endif
24895+
24896+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
24897 xor %eax,%eax
24898 ASM_CLAC
24899+ pax_force_retaddr
24900 ret
24901 CFI_ENDPROC
24902 ENDPROC(__get_user_2)
24903@@ -67,14 +104,28 @@ ENDPROC(__get_user_2)
24904 ENTRY(__get_user_4)
24905 CFI_STARTPROC
24906 add $3,%_ASM_AX
24907+
24908+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
24909 jc bad_get_user
24910 GET_THREAD_INFO(%_ASM_DX)
24911 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24912 jae bad_get_user
24913 ASM_STAC
24914-3: mov -3(%_ASM_AX),%edx
24915+
24916+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24917+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24918+ cmp %_ASM_DX,%_ASM_AX
24919+ jae 1234f
24920+ add %_ASM_DX,%_ASM_AX
24921+1234:
24922+#endif
24923+
24924+#endif
24925+
24926+3: __copyuser_seg mov -3(%_ASM_AX),%edx
24927 xor %eax,%eax
24928 ASM_CLAC
24929+ pax_force_retaddr
24930 ret
24931 CFI_ENDPROC
24932 ENDPROC(__get_user_4)
24933@@ -87,10 +138,20 @@ ENTRY(__get_user_8)
24934 GET_THREAD_INFO(%_ASM_DX)
24935 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
24936 jae bad_get_user
24937+
24938+#ifdef CONFIG_PAX_MEMORY_UDEREF
24939+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
24940+ cmp %_ASM_DX,%_ASM_AX
24941+ jae 1234f
24942+ add %_ASM_DX,%_ASM_AX
24943+1234:
24944+#endif
24945+
24946 ASM_STAC
24947 4: movq -7(%_ASM_AX),%_ASM_DX
24948 xor %eax,%eax
24949 ASM_CLAC
24950+ pax_force_retaddr
24951 ret
24952 CFI_ENDPROC
24953 ENDPROC(__get_user_8)
24954@@ -101,6 +162,7 @@ bad_get_user:
24955 xor %edx,%edx
24956 mov $(-EFAULT),%_ASM_AX
24957 ASM_CLAC
24958+ pax_force_retaddr
24959 ret
24960 CFI_ENDPROC
24961 END(bad_get_user)
24962diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
24963index 54fcffe..7be149e 100644
24964--- a/arch/x86/lib/insn.c
24965+++ b/arch/x86/lib/insn.c
24966@@ -20,8 +20,10 @@
24967
24968 #ifdef __KERNEL__
24969 #include <linux/string.h>
24970+#include <asm/pgtable_types.h>
24971 #else
24972 #include <string.h>
24973+#define ktla_ktva(addr) addr
24974 #endif
24975 #include <asm/inat.h>
24976 #include <asm/insn.h>
24977@@ -53,8 +55,8 @@
24978 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
24979 {
24980 memset(insn, 0, sizeof(*insn));
24981- insn->kaddr = kaddr;
24982- insn->next_byte = kaddr;
24983+ insn->kaddr = ktla_ktva(kaddr);
24984+ insn->next_byte = ktla_ktva(kaddr);
24985 insn->x86_64 = x86_64 ? 1 : 0;
24986 insn->opnd_bytes = 4;
24987 if (x86_64)
24988diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
24989index 05a95e7..326f2fa 100644
24990--- a/arch/x86/lib/iomap_copy_64.S
24991+++ b/arch/x86/lib/iomap_copy_64.S
24992@@ -17,6 +17,7 @@
24993
24994 #include <linux/linkage.h>
24995 #include <asm/dwarf2.h>
24996+#include <asm/alternative-asm.h>
24997
24998 /*
24999 * override generic version in lib/iomap_copy.c
25000@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25001 CFI_STARTPROC
25002 movl %edx,%ecx
25003 rep movsd
25004+ pax_force_retaddr
25005 ret
25006 CFI_ENDPROC
25007 ENDPROC(__iowrite32_copy)
25008diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25009index 1c273be..da9cc0e 100644
25010--- a/arch/x86/lib/memcpy_64.S
25011+++ b/arch/x86/lib/memcpy_64.S
25012@@ -33,6 +33,7 @@
25013 rep movsq
25014 movl %edx, %ecx
25015 rep movsb
25016+ pax_force_retaddr
25017 ret
25018 .Lmemcpy_e:
25019 .previous
25020@@ -49,6 +50,7 @@
25021 movq %rdi, %rax
25022 movq %rdx, %rcx
25023 rep movsb
25024+ pax_force_retaddr
25025 ret
25026 .Lmemcpy_e_e:
25027 .previous
25028@@ -76,13 +78,13 @@ ENTRY(memcpy)
25029 */
25030 movq 0*8(%rsi), %r8
25031 movq 1*8(%rsi), %r9
25032- movq 2*8(%rsi), %r10
25033+ movq 2*8(%rsi), %rcx
25034 movq 3*8(%rsi), %r11
25035 leaq 4*8(%rsi), %rsi
25036
25037 movq %r8, 0*8(%rdi)
25038 movq %r9, 1*8(%rdi)
25039- movq %r10, 2*8(%rdi)
25040+ movq %rcx, 2*8(%rdi)
25041 movq %r11, 3*8(%rdi)
25042 leaq 4*8(%rdi), %rdi
25043 jae .Lcopy_forward_loop
25044@@ -105,12 +107,12 @@ ENTRY(memcpy)
25045 subq $0x20, %rdx
25046 movq -1*8(%rsi), %r8
25047 movq -2*8(%rsi), %r9
25048- movq -3*8(%rsi), %r10
25049+ movq -3*8(%rsi), %rcx
25050 movq -4*8(%rsi), %r11
25051 leaq -4*8(%rsi), %rsi
25052 movq %r8, -1*8(%rdi)
25053 movq %r9, -2*8(%rdi)
25054- movq %r10, -3*8(%rdi)
25055+ movq %rcx, -3*8(%rdi)
25056 movq %r11, -4*8(%rdi)
25057 leaq -4*8(%rdi), %rdi
25058 jae .Lcopy_backward_loop
25059@@ -130,12 +132,13 @@ ENTRY(memcpy)
25060 */
25061 movq 0*8(%rsi), %r8
25062 movq 1*8(%rsi), %r9
25063- movq -2*8(%rsi, %rdx), %r10
25064+ movq -2*8(%rsi, %rdx), %rcx
25065 movq -1*8(%rsi, %rdx), %r11
25066 movq %r8, 0*8(%rdi)
25067 movq %r9, 1*8(%rdi)
25068- movq %r10, -2*8(%rdi, %rdx)
25069+ movq %rcx, -2*8(%rdi, %rdx)
25070 movq %r11, -1*8(%rdi, %rdx)
25071+ pax_force_retaddr
25072 retq
25073 .p2align 4
25074 .Lless_16bytes:
25075@@ -148,6 +151,7 @@ ENTRY(memcpy)
25076 movq -1*8(%rsi, %rdx), %r9
25077 movq %r8, 0*8(%rdi)
25078 movq %r9, -1*8(%rdi, %rdx)
25079+ pax_force_retaddr
25080 retq
25081 .p2align 4
25082 .Lless_8bytes:
25083@@ -161,6 +165,7 @@ ENTRY(memcpy)
25084 movl -4(%rsi, %rdx), %r8d
25085 movl %ecx, (%rdi)
25086 movl %r8d, -4(%rdi, %rdx)
25087+ pax_force_retaddr
25088 retq
25089 .p2align 4
25090 .Lless_3bytes:
25091@@ -179,6 +184,7 @@ ENTRY(memcpy)
25092 movb %cl, (%rdi)
25093
25094 .Lend:
25095+ pax_force_retaddr
25096 retq
25097 CFI_ENDPROC
25098 ENDPROC(memcpy)
25099diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25100index ee16461..c39c199 100644
25101--- a/arch/x86/lib/memmove_64.S
25102+++ b/arch/x86/lib/memmove_64.S
25103@@ -61,13 +61,13 @@ ENTRY(memmove)
25104 5:
25105 sub $0x20, %rdx
25106 movq 0*8(%rsi), %r11
25107- movq 1*8(%rsi), %r10
25108+ movq 1*8(%rsi), %rcx
25109 movq 2*8(%rsi), %r9
25110 movq 3*8(%rsi), %r8
25111 leaq 4*8(%rsi), %rsi
25112
25113 movq %r11, 0*8(%rdi)
25114- movq %r10, 1*8(%rdi)
25115+ movq %rcx, 1*8(%rdi)
25116 movq %r9, 2*8(%rdi)
25117 movq %r8, 3*8(%rdi)
25118 leaq 4*8(%rdi), %rdi
25119@@ -81,10 +81,10 @@ ENTRY(memmove)
25120 4:
25121 movq %rdx, %rcx
25122 movq -8(%rsi, %rdx), %r11
25123- lea -8(%rdi, %rdx), %r10
25124+ lea -8(%rdi, %rdx), %r9
25125 shrq $3, %rcx
25126 rep movsq
25127- movq %r11, (%r10)
25128+ movq %r11, (%r9)
25129 jmp 13f
25130 .Lmemmove_end_forward:
25131
25132@@ -95,14 +95,14 @@ ENTRY(memmove)
25133 7:
25134 movq %rdx, %rcx
25135 movq (%rsi), %r11
25136- movq %rdi, %r10
25137+ movq %rdi, %r9
25138 leaq -8(%rsi, %rdx), %rsi
25139 leaq -8(%rdi, %rdx), %rdi
25140 shrq $3, %rcx
25141 std
25142 rep movsq
25143 cld
25144- movq %r11, (%r10)
25145+ movq %r11, (%r9)
25146 jmp 13f
25147
25148 /*
25149@@ -127,13 +127,13 @@ ENTRY(memmove)
25150 8:
25151 subq $0x20, %rdx
25152 movq -1*8(%rsi), %r11
25153- movq -2*8(%rsi), %r10
25154+ movq -2*8(%rsi), %rcx
25155 movq -3*8(%rsi), %r9
25156 movq -4*8(%rsi), %r8
25157 leaq -4*8(%rsi), %rsi
25158
25159 movq %r11, -1*8(%rdi)
25160- movq %r10, -2*8(%rdi)
25161+ movq %rcx, -2*8(%rdi)
25162 movq %r9, -3*8(%rdi)
25163 movq %r8, -4*8(%rdi)
25164 leaq -4*8(%rdi), %rdi
25165@@ -151,11 +151,11 @@ ENTRY(memmove)
25166 * Move data from 16 bytes to 31 bytes.
25167 */
25168 movq 0*8(%rsi), %r11
25169- movq 1*8(%rsi), %r10
25170+ movq 1*8(%rsi), %rcx
25171 movq -2*8(%rsi, %rdx), %r9
25172 movq -1*8(%rsi, %rdx), %r8
25173 movq %r11, 0*8(%rdi)
25174- movq %r10, 1*8(%rdi)
25175+ movq %rcx, 1*8(%rdi)
25176 movq %r9, -2*8(%rdi, %rdx)
25177 movq %r8, -1*8(%rdi, %rdx)
25178 jmp 13f
25179@@ -167,9 +167,9 @@ ENTRY(memmove)
25180 * Move data from 8 bytes to 15 bytes.
25181 */
25182 movq 0*8(%rsi), %r11
25183- movq -1*8(%rsi, %rdx), %r10
25184+ movq -1*8(%rsi, %rdx), %r9
25185 movq %r11, 0*8(%rdi)
25186- movq %r10, -1*8(%rdi, %rdx)
25187+ movq %r9, -1*8(%rdi, %rdx)
25188 jmp 13f
25189 10:
25190 cmpq $4, %rdx
25191@@ -178,9 +178,9 @@ ENTRY(memmove)
25192 * Move data from 4 bytes to 7 bytes.
25193 */
25194 movl (%rsi), %r11d
25195- movl -4(%rsi, %rdx), %r10d
25196+ movl -4(%rsi, %rdx), %r9d
25197 movl %r11d, (%rdi)
25198- movl %r10d, -4(%rdi, %rdx)
25199+ movl %r9d, -4(%rdi, %rdx)
25200 jmp 13f
25201 11:
25202 cmp $2, %rdx
25203@@ -189,9 +189,9 @@ ENTRY(memmove)
25204 * Move data from 2 bytes to 3 bytes.
25205 */
25206 movw (%rsi), %r11w
25207- movw -2(%rsi, %rdx), %r10w
25208+ movw -2(%rsi, %rdx), %r9w
25209 movw %r11w, (%rdi)
25210- movw %r10w, -2(%rdi, %rdx)
25211+ movw %r9w, -2(%rdi, %rdx)
25212 jmp 13f
25213 12:
25214 cmp $1, %rdx
25215@@ -202,6 +202,7 @@ ENTRY(memmove)
25216 movb (%rsi), %r11b
25217 movb %r11b, (%rdi)
25218 13:
25219+ pax_force_retaddr
25220 retq
25221 CFI_ENDPROC
25222
25223@@ -210,6 +211,7 @@ ENTRY(memmove)
25224 /* Forward moving data. */
25225 movq %rdx, %rcx
25226 rep movsb
25227+ pax_force_retaddr
25228 retq
25229 .Lmemmove_end_forward_efs:
25230 .previous
25231diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25232index 2dcb380..963660a 100644
25233--- a/arch/x86/lib/memset_64.S
25234+++ b/arch/x86/lib/memset_64.S
25235@@ -30,6 +30,7 @@
25236 movl %edx,%ecx
25237 rep stosb
25238 movq %r9,%rax
25239+ pax_force_retaddr
25240 ret
25241 .Lmemset_e:
25242 .previous
25243@@ -52,6 +53,7 @@
25244 movq %rdx,%rcx
25245 rep stosb
25246 movq %r9,%rax
25247+ pax_force_retaddr
25248 ret
25249 .Lmemset_e_e:
25250 .previous
25251@@ -59,7 +61,7 @@
25252 ENTRY(memset)
25253 ENTRY(__memset)
25254 CFI_STARTPROC
25255- movq %rdi,%r10
25256+ movq %rdi,%r11
25257
25258 /* expand byte value */
25259 movzbl %sil,%ecx
25260@@ -117,7 +119,8 @@ ENTRY(__memset)
25261 jnz .Lloop_1
25262
25263 .Lende:
25264- movq %r10,%rax
25265+ movq %r11,%rax
25266+ pax_force_retaddr
25267 ret
25268
25269 CFI_RESTORE_STATE
25270diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25271index c9f2d9b..e7fd2c0 100644
25272--- a/arch/x86/lib/mmx_32.c
25273+++ b/arch/x86/lib/mmx_32.c
25274@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25275 {
25276 void *p;
25277 int i;
25278+ unsigned long cr0;
25279
25280 if (unlikely(in_interrupt()))
25281 return __memcpy(to, from, len);
25282@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25283 kernel_fpu_begin();
25284
25285 __asm__ __volatile__ (
25286- "1: prefetch (%0)\n" /* This set is 28 bytes */
25287- " prefetch 64(%0)\n"
25288- " prefetch 128(%0)\n"
25289- " prefetch 192(%0)\n"
25290- " prefetch 256(%0)\n"
25291+ "1: prefetch (%1)\n" /* This set is 28 bytes */
25292+ " prefetch 64(%1)\n"
25293+ " prefetch 128(%1)\n"
25294+ " prefetch 192(%1)\n"
25295+ " prefetch 256(%1)\n"
25296 "2: \n"
25297 ".section .fixup, \"ax\"\n"
25298- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25299+ "3: \n"
25300+
25301+#ifdef CONFIG_PAX_KERNEXEC
25302+ " movl %%cr0, %0\n"
25303+ " movl %0, %%eax\n"
25304+ " andl $0xFFFEFFFF, %%eax\n"
25305+ " movl %%eax, %%cr0\n"
25306+#endif
25307+
25308+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25309+
25310+#ifdef CONFIG_PAX_KERNEXEC
25311+ " movl %0, %%cr0\n"
25312+#endif
25313+
25314 " jmp 2b\n"
25315 ".previous\n"
25316 _ASM_EXTABLE(1b, 3b)
25317- : : "r" (from));
25318+ : "=&r" (cr0) : "r" (from) : "ax");
25319
25320 for ( ; i > 5; i--) {
25321 __asm__ __volatile__ (
25322- "1: prefetch 320(%0)\n"
25323- "2: movq (%0), %%mm0\n"
25324- " movq 8(%0), %%mm1\n"
25325- " movq 16(%0), %%mm2\n"
25326- " movq 24(%0), %%mm3\n"
25327- " movq %%mm0, (%1)\n"
25328- " movq %%mm1, 8(%1)\n"
25329- " movq %%mm2, 16(%1)\n"
25330- " movq %%mm3, 24(%1)\n"
25331- " movq 32(%0), %%mm0\n"
25332- " movq 40(%0), %%mm1\n"
25333- " movq 48(%0), %%mm2\n"
25334- " movq 56(%0), %%mm3\n"
25335- " movq %%mm0, 32(%1)\n"
25336- " movq %%mm1, 40(%1)\n"
25337- " movq %%mm2, 48(%1)\n"
25338- " movq %%mm3, 56(%1)\n"
25339+ "1: prefetch 320(%1)\n"
25340+ "2: movq (%1), %%mm0\n"
25341+ " movq 8(%1), %%mm1\n"
25342+ " movq 16(%1), %%mm2\n"
25343+ " movq 24(%1), %%mm3\n"
25344+ " movq %%mm0, (%2)\n"
25345+ " movq %%mm1, 8(%2)\n"
25346+ " movq %%mm2, 16(%2)\n"
25347+ " movq %%mm3, 24(%2)\n"
25348+ " movq 32(%1), %%mm0\n"
25349+ " movq 40(%1), %%mm1\n"
25350+ " movq 48(%1), %%mm2\n"
25351+ " movq 56(%1), %%mm3\n"
25352+ " movq %%mm0, 32(%2)\n"
25353+ " movq %%mm1, 40(%2)\n"
25354+ " movq %%mm2, 48(%2)\n"
25355+ " movq %%mm3, 56(%2)\n"
25356 ".section .fixup, \"ax\"\n"
25357- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25358+ "3:\n"
25359+
25360+#ifdef CONFIG_PAX_KERNEXEC
25361+ " movl %%cr0, %0\n"
25362+ " movl %0, %%eax\n"
25363+ " andl $0xFFFEFFFF, %%eax\n"
25364+ " movl %%eax, %%cr0\n"
25365+#endif
25366+
25367+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25368+
25369+#ifdef CONFIG_PAX_KERNEXEC
25370+ " movl %0, %%cr0\n"
25371+#endif
25372+
25373 " jmp 2b\n"
25374 ".previous\n"
25375 _ASM_EXTABLE(1b, 3b)
25376- : : "r" (from), "r" (to) : "memory");
25377+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25378
25379 from += 64;
25380 to += 64;
25381@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25382 static void fast_copy_page(void *to, void *from)
25383 {
25384 int i;
25385+ unsigned long cr0;
25386
25387 kernel_fpu_begin();
25388
25389@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25390 * but that is for later. -AV
25391 */
25392 __asm__ __volatile__(
25393- "1: prefetch (%0)\n"
25394- " prefetch 64(%0)\n"
25395- " prefetch 128(%0)\n"
25396- " prefetch 192(%0)\n"
25397- " prefetch 256(%0)\n"
25398+ "1: prefetch (%1)\n"
25399+ " prefetch 64(%1)\n"
25400+ " prefetch 128(%1)\n"
25401+ " prefetch 192(%1)\n"
25402+ " prefetch 256(%1)\n"
25403 "2: \n"
25404 ".section .fixup, \"ax\"\n"
25405- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25406+ "3: \n"
25407+
25408+#ifdef CONFIG_PAX_KERNEXEC
25409+ " movl %%cr0, %0\n"
25410+ " movl %0, %%eax\n"
25411+ " andl $0xFFFEFFFF, %%eax\n"
25412+ " movl %%eax, %%cr0\n"
25413+#endif
25414+
25415+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25416+
25417+#ifdef CONFIG_PAX_KERNEXEC
25418+ " movl %0, %%cr0\n"
25419+#endif
25420+
25421 " jmp 2b\n"
25422 ".previous\n"
25423- _ASM_EXTABLE(1b, 3b) : : "r" (from));
25424+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25425
25426 for (i = 0; i < (4096-320)/64; i++) {
25427 __asm__ __volatile__ (
25428- "1: prefetch 320(%0)\n"
25429- "2: movq (%0), %%mm0\n"
25430- " movntq %%mm0, (%1)\n"
25431- " movq 8(%0), %%mm1\n"
25432- " movntq %%mm1, 8(%1)\n"
25433- " movq 16(%0), %%mm2\n"
25434- " movntq %%mm2, 16(%1)\n"
25435- " movq 24(%0), %%mm3\n"
25436- " movntq %%mm3, 24(%1)\n"
25437- " movq 32(%0), %%mm4\n"
25438- " movntq %%mm4, 32(%1)\n"
25439- " movq 40(%0), %%mm5\n"
25440- " movntq %%mm5, 40(%1)\n"
25441- " movq 48(%0), %%mm6\n"
25442- " movntq %%mm6, 48(%1)\n"
25443- " movq 56(%0), %%mm7\n"
25444- " movntq %%mm7, 56(%1)\n"
25445+ "1: prefetch 320(%1)\n"
25446+ "2: movq (%1), %%mm0\n"
25447+ " movntq %%mm0, (%2)\n"
25448+ " movq 8(%1), %%mm1\n"
25449+ " movntq %%mm1, 8(%2)\n"
25450+ " movq 16(%1), %%mm2\n"
25451+ " movntq %%mm2, 16(%2)\n"
25452+ " movq 24(%1), %%mm3\n"
25453+ " movntq %%mm3, 24(%2)\n"
25454+ " movq 32(%1), %%mm4\n"
25455+ " movntq %%mm4, 32(%2)\n"
25456+ " movq 40(%1), %%mm5\n"
25457+ " movntq %%mm5, 40(%2)\n"
25458+ " movq 48(%1), %%mm6\n"
25459+ " movntq %%mm6, 48(%2)\n"
25460+ " movq 56(%1), %%mm7\n"
25461+ " movntq %%mm7, 56(%2)\n"
25462 ".section .fixup, \"ax\"\n"
25463- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25464+ "3:\n"
25465+
25466+#ifdef CONFIG_PAX_KERNEXEC
25467+ " movl %%cr0, %0\n"
25468+ " movl %0, %%eax\n"
25469+ " andl $0xFFFEFFFF, %%eax\n"
25470+ " movl %%eax, %%cr0\n"
25471+#endif
25472+
25473+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25474+
25475+#ifdef CONFIG_PAX_KERNEXEC
25476+ " movl %0, %%cr0\n"
25477+#endif
25478+
25479 " jmp 2b\n"
25480 ".previous\n"
25481- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
25482+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25483
25484 from += 64;
25485 to += 64;
25486@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
25487 static void fast_copy_page(void *to, void *from)
25488 {
25489 int i;
25490+ unsigned long cr0;
25491
25492 kernel_fpu_begin();
25493
25494 __asm__ __volatile__ (
25495- "1: prefetch (%0)\n"
25496- " prefetch 64(%0)\n"
25497- " prefetch 128(%0)\n"
25498- " prefetch 192(%0)\n"
25499- " prefetch 256(%0)\n"
25500+ "1: prefetch (%1)\n"
25501+ " prefetch 64(%1)\n"
25502+ " prefetch 128(%1)\n"
25503+ " prefetch 192(%1)\n"
25504+ " prefetch 256(%1)\n"
25505 "2: \n"
25506 ".section .fixup, \"ax\"\n"
25507- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25508+ "3: \n"
25509+
25510+#ifdef CONFIG_PAX_KERNEXEC
25511+ " movl %%cr0, %0\n"
25512+ " movl %0, %%eax\n"
25513+ " andl $0xFFFEFFFF, %%eax\n"
25514+ " movl %%eax, %%cr0\n"
25515+#endif
25516+
25517+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25518+
25519+#ifdef CONFIG_PAX_KERNEXEC
25520+ " movl %0, %%cr0\n"
25521+#endif
25522+
25523 " jmp 2b\n"
25524 ".previous\n"
25525- _ASM_EXTABLE(1b, 3b) : : "r" (from));
25526+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
25527
25528 for (i = 0; i < 4096/64; i++) {
25529 __asm__ __volatile__ (
25530- "1: prefetch 320(%0)\n"
25531- "2: movq (%0), %%mm0\n"
25532- " movq 8(%0), %%mm1\n"
25533- " movq 16(%0), %%mm2\n"
25534- " movq 24(%0), %%mm3\n"
25535- " movq %%mm0, (%1)\n"
25536- " movq %%mm1, 8(%1)\n"
25537- " movq %%mm2, 16(%1)\n"
25538- " movq %%mm3, 24(%1)\n"
25539- " movq 32(%0), %%mm0\n"
25540- " movq 40(%0), %%mm1\n"
25541- " movq 48(%0), %%mm2\n"
25542- " movq 56(%0), %%mm3\n"
25543- " movq %%mm0, 32(%1)\n"
25544- " movq %%mm1, 40(%1)\n"
25545- " movq %%mm2, 48(%1)\n"
25546- " movq %%mm3, 56(%1)\n"
25547+ "1: prefetch 320(%1)\n"
25548+ "2: movq (%1), %%mm0\n"
25549+ " movq 8(%1), %%mm1\n"
25550+ " movq 16(%1), %%mm2\n"
25551+ " movq 24(%1), %%mm3\n"
25552+ " movq %%mm0, (%2)\n"
25553+ " movq %%mm1, 8(%2)\n"
25554+ " movq %%mm2, 16(%2)\n"
25555+ " movq %%mm3, 24(%2)\n"
25556+ " movq 32(%1), %%mm0\n"
25557+ " movq 40(%1), %%mm1\n"
25558+ " movq 48(%1), %%mm2\n"
25559+ " movq 56(%1), %%mm3\n"
25560+ " movq %%mm0, 32(%2)\n"
25561+ " movq %%mm1, 40(%2)\n"
25562+ " movq %%mm2, 48(%2)\n"
25563+ " movq %%mm3, 56(%2)\n"
25564 ".section .fixup, \"ax\"\n"
25565- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25566+ "3:\n"
25567+
25568+#ifdef CONFIG_PAX_KERNEXEC
25569+ " movl %%cr0, %0\n"
25570+ " movl %0, %%eax\n"
25571+ " andl $0xFFFEFFFF, %%eax\n"
25572+ " movl %%eax, %%cr0\n"
25573+#endif
25574+
25575+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25576+
25577+#ifdef CONFIG_PAX_KERNEXEC
25578+ " movl %0, %%cr0\n"
25579+#endif
25580+
25581 " jmp 2b\n"
25582 ".previous\n"
25583 _ASM_EXTABLE(1b, 3b)
25584- : : "r" (from), "r" (to) : "memory");
25585+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25586
25587 from += 64;
25588 to += 64;
25589diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
25590index f6d13ee..aca5f0b 100644
25591--- a/arch/x86/lib/msr-reg.S
25592+++ b/arch/x86/lib/msr-reg.S
25593@@ -3,6 +3,7 @@
25594 #include <asm/dwarf2.h>
25595 #include <asm/asm.h>
25596 #include <asm/msr.h>
25597+#include <asm/alternative-asm.h>
25598
25599 #ifdef CONFIG_X86_64
25600 /*
25601@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
25602 CFI_STARTPROC
25603 pushq_cfi %rbx
25604 pushq_cfi %rbp
25605- movq %rdi, %r10 /* Save pointer */
25606+ movq %rdi, %r9 /* Save pointer */
25607 xorl %r11d, %r11d /* Return value */
25608 movl (%rdi), %eax
25609 movl 4(%rdi), %ecx
25610@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
25611 movl 28(%rdi), %edi
25612 CFI_REMEMBER_STATE
25613 1: \op
25614-2: movl %eax, (%r10)
25615+2: movl %eax, (%r9)
25616 movl %r11d, %eax /* Return value */
25617- movl %ecx, 4(%r10)
25618- movl %edx, 8(%r10)
25619- movl %ebx, 12(%r10)
25620- movl %ebp, 20(%r10)
25621- movl %esi, 24(%r10)
25622- movl %edi, 28(%r10)
25623+ movl %ecx, 4(%r9)
25624+ movl %edx, 8(%r9)
25625+ movl %ebx, 12(%r9)
25626+ movl %ebp, 20(%r9)
25627+ movl %esi, 24(%r9)
25628+ movl %edi, 28(%r9)
25629 popq_cfi %rbp
25630 popq_cfi %rbx
25631+ pax_force_retaddr
25632 ret
25633 3:
25634 CFI_RESTORE_STATE
25635diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
25636index fc6ba17..04471c5 100644
25637--- a/arch/x86/lib/putuser.S
25638+++ b/arch/x86/lib/putuser.S
25639@@ -16,7 +16,9 @@
25640 #include <asm/errno.h>
25641 #include <asm/asm.h>
25642 #include <asm/smap.h>
25643-
25644+#include <asm/segment.h>
25645+#include <asm/pgtable.h>
25646+#include <asm/alternative-asm.h>
25647
25648 /*
25649 * __put_user_X
25650@@ -30,57 +32,125 @@
25651 * as they get called from within inline assembly.
25652 */
25653
25654-#define ENTER CFI_STARTPROC ; \
25655- GET_THREAD_INFO(%_ASM_BX)
25656-#define EXIT ASM_CLAC ; \
25657- ret ; \
25658+#define ENTER CFI_STARTPROC
25659+#define EXIT ASM_CLAC ; \
25660+ pax_force_retaddr ; \
25661+ ret ; \
25662 CFI_ENDPROC
25663
25664+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25665+#define _DEST %_ASM_CX,%_ASM_BX
25666+#else
25667+#define _DEST %_ASM_CX
25668+#endif
25669+
25670+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25671+#define __copyuser_seg gs;
25672+#else
25673+#define __copyuser_seg
25674+#endif
25675+
25676 .text
25677 ENTRY(__put_user_1)
25678 ENTER
25679+
25680+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25681+ GET_THREAD_INFO(%_ASM_BX)
25682 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
25683 jae bad_put_user
25684 ASM_STAC
25685-1: movb %al,(%_ASM_CX)
25686+
25687+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25688+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25689+ cmp %_ASM_BX,%_ASM_CX
25690+ jb 1234f
25691+ xor %ebx,%ebx
25692+1234:
25693+#endif
25694+
25695+#endif
25696+
25697+1: __copyuser_seg movb %al,(_DEST)
25698 xor %eax,%eax
25699 EXIT
25700 ENDPROC(__put_user_1)
25701
25702 ENTRY(__put_user_2)
25703 ENTER
25704+
25705+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25706+ GET_THREAD_INFO(%_ASM_BX)
25707 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25708 sub $1,%_ASM_BX
25709 cmp %_ASM_BX,%_ASM_CX
25710 jae bad_put_user
25711 ASM_STAC
25712-2: movw %ax,(%_ASM_CX)
25713+
25714+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25715+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25716+ cmp %_ASM_BX,%_ASM_CX
25717+ jb 1234f
25718+ xor %ebx,%ebx
25719+1234:
25720+#endif
25721+
25722+#endif
25723+
25724+2: __copyuser_seg movw %ax,(_DEST)
25725 xor %eax,%eax
25726 EXIT
25727 ENDPROC(__put_user_2)
25728
25729 ENTRY(__put_user_4)
25730 ENTER
25731+
25732+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25733+ GET_THREAD_INFO(%_ASM_BX)
25734 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25735 sub $3,%_ASM_BX
25736 cmp %_ASM_BX,%_ASM_CX
25737 jae bad_put_user
25738 ASM_STAC
25739-3: movl %eax,(%_ASM_CX)
25740+
25741+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25742+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25743+ cmp %_ASM_BX,%_ASM_CX
25744+ jb 1234f
25745+ xor %ebx,%ebx
25746+1234:
25747+#endif
25748+
25749+#endif
25750+
25751+3: __copyuser_seg movl %eax,(_DEST)
25752 xor %eax,%eax
25753 EXIT
25754 ENDPROC(__put_user_4)
25755
25756 ENTRY(__put_user_8)
25757 ENTER
25758+
25759+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25760+ GET_THREAD_INFO(%_ASM_BX)
25761 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
25762 sub $7,%_ASM_BX
25763 cmp %_ASM_BX,%_ASM_CX
25764 jae bad_put_user
25765 ASM_STAC
25766-4: mov %_ASM_AX,(%_ASM_CX)
25767+
25768+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25769+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
25770+ cmp %_ASM_BX,%_ASM_CX
25771+ jb 1234f
25772+ xor %ebx,%ebx
25773+1234:
25774+#endif
25775+
25776+#endif
25777+
25778+4: __copyuser_seg mov %_ASM_AX,(_DEST)
25779 #ifdef CONFIG_X86_32
25780-5: movl %edx,4(%_ASM_CX)
25781+5: __copyuser_seg movl %edx,4(_DEST)
25782 #endif
25783 xor %eax,%eax
25784 EXIT
25785diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
25786index 1cad221..de671ee 100644
25787--- a/arch/x86/lib/rwlock.S
25788+++ b/arch/x86/lib/rwlock.S
25789@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
25790 FRAME
25791 0: LOCK_PREFIX
25792 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
25793+
25794+#ifdef CONFIG_PAX_REFCOUNT
25795+ jno 1234f
25796+ LOCK_PREFIX
25797+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
25798+ int $4
25799+1234:
25800+ _ASM_EXTABLE(1234b, 1234b)
25801+#endif
25802+
25803 1: rep; nop
25804 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
25805 jne 1b
25806 LOCK_PREFIX
25807 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
25808+
25809+#ifdef CONFIG_PAX_REFCOUNT
25810+ jno 1234f
25811+ LOCK_PREFIX
25812+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
25813+ int $4
25814+1234:
25815+ _ASM_EXTABLE(1234b, 1234b)
25816+#endif
25817+
25818 jnz 0b
25819 ENDFRAME
25820+ pax_force_retaddr
25821 ret
25822 CFI_ENDPROC
25823 END(__write_lock_failed)
25824@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
25825 FRAME
25826 0: LOCK_PREFIX
25827 READ_LOCK_SIZE(inc) (%__lock_ptr)
25828+
25829+#ifdef CONFIG_PAX_REFCOUNT
25830+ jno 1234f
25831+ LOCK_PREFIX
25832+ READ_LOCK_SIZE(dec) (%__lock_ptr)
25833+ int $4
25834+1234:
25835+ _ASM_EXTABLE(1234b, 1234b)
25836+#endif
25837+
25838 1: rep; nop
25839 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
25840 js 1b
25841 LOCK_PREFIX
25842 READ_LOCK_SIZE(dec) (%__lock_ptr)
25843+
25844+#ifdef CONFIG_PAX_REFCOUNT
25845+ jno 1234f
25846+ LOCK_PREFIX
25847+ READ_LOCK_SIZE(inc) (%__lock_ptr)
25848+ int $4
25849+1234:
25850+ _ASM_EXTABLE(1234b, 1234b)
25851+#endif
25852+
25853 js 0b
25854 ENDFRAME
25855+ pax_force_retaddr
25856 ret
25857 CFI_ENDPROC
25858 END(__read_lock_failed)
25859diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
25860index 5dff5f0..cadebf4 100644
25861--- a/arch/x86/lib/rwsem.S
25862+++ b/arch/x86/lib/rwsem.S
25863@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
25864 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
25865 CFI_RESTORE __ASM_REG(dx)
25866 restore_common_regs
25867+ pax_force_retaddr
25868 ret
25869 CFI_ENDPROC
25870 ENDPROC(call_rwsem_down_read_failed)
25871@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
25872 movq %rax,%rdi
25873 call rwsem_down_write_failed
25874 restore_common_regs
25875+ pax_force_retaddr
25876 ret
25877 CFI_ENDPROC
25878 ENDPROC(call_rwsem_down_write_failed)
25879@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
25880 movq %rax,%rdi
25881 call rwsem_wake
25882 restore_common_regs
25883-1: ret
25884+1: pax_force_retaddr
25885+ ret
25886 CFI_ENDPROC
25887 ENDPROC(call_rwsem_wake)
25888
25889@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
25890 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
25891 CFI_RESTORE __ASM_REG(dx)
25892 restore_common_regs
25893+ pax_force_retaddr
25894 ret
25895 CFI_ENDPROC
25896 ENDPROC(call_rwsem_downgrade_wake)
25897diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
25898index a63efd6..ccecad8 100644
25899--- a/arch/x86/lib/thunk_64.S
25900+++ b/arch/x86/lib/thunk_64.S
25901@@ -8,6 +8,7 @@
25902 #include <linux/linkage.h>
25903 #include <asm/dwarf2.h>
25904 #include <asm/calling.h>
25905+#include <asm/alternative-asm.h>
25906
25907 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
25908 .macro THUNK name, func, put_ret_addr_in_rdi=0
25909@@ -41,5 +42,6 @@
25910 SAVE_ARGS
25911 restore:
25912 RESTORE_ARGS
25913+ pax_force_retaddr
25914 ret
25915 CFI_ENDPROC
25916diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
25917index f0312d7..9c39d63 100644
25918--- a/arch/x86/lib/usercopy_32.c
25919+++ b/arch/x86/lib/usercopy_32.c
25920@@ -42,11 +42,13 @@ do { \
25921 int __d0; \
25922 might_fault(); \
25923 __asm__ __volatile__( \
25924+ __COPYUSER_SET_ES \
25925 ASM_STAC "\n" \
25926 "0: rep; stosl\n" \
25927 " movl %2,%0\n" \
25928 "1: rep; stosb\n" \
25929 "2: " ASM_CLAC "\n" \
25930+ __COPYUSER_RESTORE_ES \
25931 ".section .fixup,\"ax\"\n" \
25932 "3: lea 0(%2,%0,4),%0\n" \
25933 " jmp 2b\n" \
25934@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
25935
25936 #ifdef CONFIG_X86_INTEL_USERCOPY
25937 static unsigned long
25938-__copy_user_intel(void __user *to, const void *from, unsigned long size)
25939+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
25940 {
25941 int d0, d1;
25942 __asm__ __volatile__(
25943@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25944 " .align 2,0x90\n"
25945 "3: movl 0(%4), %%eax\n"
25946 "4: movl 4(%4), %%edx\n"
25947- "5: movl %%eax, 0(%3)\n"
25948- "6: movl %%edx, 4(%3)\n"
25949+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
25950+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
25951 "7: movl 8(%4), %%eax\n"
25952 "8: movl 12(%4),%%edx\n"
25953- "9: movl %%eax, 8(%3)\n"
25954- "10: movl %%edx, 12(%3)\n"
25955+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
25956+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
25957 "11: movl 16(%4), %%eax\n"
25958 "12: movl 20(%4), %%edx\n"
25959- "13: movl %%eax, 16(%3)\n"
25960- "14: movl %%edx, 20(%3)\n"
25961+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
25962+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
25963 "15: movl 24(%4), %%eax\n"
25964 "16: movl 28(%4), %%edx\n"
25965- "17: movl %%eax, 24(%3)\n"
25966- "18: movl %%edx, 28(%3)\n"
25967+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
25968+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
25969 "19: movl 32(%4), %%eax\n"
25970 "20: movl 36(%4), %%edx\n"
25971- "21: movl %%eax, 32(%3)\n"
25972- "22: movl %%edx, 36(%3)\n"
25973+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
25974+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
25975 "23: movl 40(%4), %%eax\n"
25976 "24: movl 44(%4), %%edx\n"
25977- "25: movl %%eax, 40(%3)\n"
25978- "26: movl %%edx, 44(%3)\n"
25979+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
25980+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
25981 "27: movl 48(%4), %%eax\n"
25982 "28: movl 52(%4), %%edx\n"
25983- "29: movl %%eax, 48(%3)\n"
25984- "30: movl %%edx, 52(%3)\n"
25985+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
25986+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
25987 "31: movl 56(%4), %%eax\n"
25988 "32: movl 60(%4), %%edx\n"
25989- "33: movl %%eax, 56(%3)\n"
25990- "34: movl %%edx, 60(%3)\n"
25991+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
25992+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
25993 " addl $-64, %0\n"
25994 " addl $64, %4\n"
25995 " addl $64, %3\n"
25996@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
25997 " shrl $2, %0\n"
25998 " andl $3, %%eax\n"
25999 " cld\n"
26000+ __COPYUSER_SET_ES
26001 "99: rep; movsl\n"
26002 "36: movl %%eax, %0\n"
26003 "37: rep; movsb\n"
26004 "100:\n"
26005+ __COPYUSER_RESTORE_ES
26006 ".section .fixup,\"ax\"\n"
26007 "101: lea 0(%%eax,%0,4),%0\n"
26008 " jmp 100b\n"
26009@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26010 }
26011
26012 static unsigned long
26013+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26014+{
26015+ int d0, d1;
26016+ __asm__ __volatile__(
26017+ " .align 2,0x90\n"
26018+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26019+ " cmpl $67, %0\n"
26020+ " jbe 3f\n"
26021+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26022+ " .align 2,0x90\n"
26023+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26024+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26025+ "5: movl %%eax, 0(%3)\n"
26026+ "6: movl %%edx, 4(%3)\n"
26027+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26028+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26029+ "9: movl %%eax, 8(%3)\n"
26030+ "10: movl %%edx, 12(%3)\n"
26031+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26032+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26033+ "13: movl %%eax, 16(%3)\n"
26034+ "14: movl %%edx, 20(%3)\n"
26035+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26036+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26037+ "17: movl %%eax, 24(%3)\n"
26038+ "18: movl %%edx, 28(%3)\n"
26039+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26040+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26041+ "21: movl %%eax, 32(%3)\n"
26042+ "22: movl %%edx, 36(%3)\n"
26043+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26044+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26045+ "25: movl %%eax, 40(%3)\n"
26046+ "26: movl %%edx, 44(%3)\n"
26047+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26048+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26049+ "29: movl %%eax, 48(%3)\n"
26050+ "30: movl %%edx, 52(%3)\n"
26051+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26052+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26053+ "33: movl %%eax, 56(%3)\n"
26054+ "34: movl %%edx, 60(%3)\n"
26055+ " addl $-64, %0\n"
26056+ " addl $64, %4\n"
26057+ " addl $64, %3\n"
26058+ " cmpl $63, %0\n"
26059+ " ja 1b\n"
26060+ "35: movl %0, %%eax\n"
26061+ " shrl $2, %0\n"
26062+ " andl $3, %%eax\n"
26063+ " cld\n"
26064+ "99: rep; "__copyuser_seg" movsl\n"
26065+ "36: movl %%eax, %0\n"
26066+ "37: rep; "__copyuser_seg" movsb\n"
26067+ "100:\n"
26068+ ".section .fixup,\"ax\"\n"
26069+ "101: lea 0(%%eax,%0,4),%0\n"
26070+ " jmp 100b\n"
26071+ ".previous\n"
26072+ _ASM_EXTABLE(1b,100b)
26073+ _ASM_EXTABLE(2b,100b)
26074+ _ASM_EXTABLE(3b,100b)
26075+ _ASM_EXTABLE(4b,100b)
26076+ _ASM_EXTABLE(5b,100b)
26077+ _ASM_EXTABLE(6b,100b)
26078+ _ASM_EXTABLE(7b,100b)
26079+ _ASM_EXTABLE(8b,100b)
26080+ _ASM_EXTABLE(9b,100b)
26081+ _ASM_EXTABLE(10b,100b)
26082+ _ASM_EXTABLE(11b,100b)
26083+ _ASM_EXTABLE(12b,100b)
26084+ _ASM_EXTABLE(13b,100b)
26085+ _ASM_EXTABLE(14b,100b)
26086+ _ASM_EXTABLE(15b,100b)
26087+ _ASM_EXTABLE(16b,100b)
26088+ _ASM_EXTABLE(17b,100b)
26089+ _ASM_EXTABLE(18b,100b)
26090+ _ASM_EXTABLE(19b,100b)
26091+ _ASM_EXTABLE(20b,100b)
26092+ _ASM_EXTABLE(21b,100b)
26093+ _ASM_EXTABLE(22b,100b)
26094+ _ASM_EXTABLE(23b,100b)
26095+ _ASM_EXTABLE(24b,100b)
26096+ _ASM_EXTABLE(25b,100b)
26097+ _ASM_EXTABLE(26b,100b)
26098+ _ASM_EXTABLE(27b,100b)
26099+ _ASM_EXTABLE(28b,100b)
26100+ _ASM_EXTABLE(29b,100b)
26101+ _ASM_EXTABLE(30b,100b)
26102+ _ASM_EXTABLE(31b,100b)
26103+ _ASM_EXTABLE(32b,100b)
26104+ _ASM_EXTABLE(33b,100b)
26105+ _ASM_EXTABLE(34b,100b)
26106+ _ASM_EXTABLE(35b,100b)
26107+ _ASM_EXTABLE(36b,100b)
26108+ _ASM_EXTABLE(37b,100b)
26109+ _ASM_EXTABLE(99b,101b)
26110+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26111+ : "1"(to), "2"(from), "0"(size)
26112+ : "eax", "edx", "memory");
26113+ return size;
26114+}
26115+
26116+static unsigned long __size_overflow(3)
26117 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26118 {
26119 int d0, d1;
26120 __asm__ __volatile__(
26121 " .align 2,0x90\n"
26122- "0: movl 32(%4), %%eax\n"
26123+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26124 " cmpl $67, %0\n"
26125 " jbe 2f\n"
26126- "1: movl 64(%4), %%eax\n"
26127+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26128 " .align 2,0x90\n"
26129- "2: movl 0(%4), %%eax\n"
26130- "21: movl 4(%4), %%edx\n"
26131+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26132+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26133 " movl %%eax, 0(%3)\n"
26134 " movl %%edx, 4(%3)\n"
26135- "3: movl 8(%4), %%eax\n"
26136- "31: movl 12(%4),%%edx\n"
26137+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26138+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26139 " movl %%eax, 8(%3)\n"
26140 " movl %%edx, 12(%3)\n"
26141- "4: movl 16(%4), %%eax\n"
26142- "41: movl 20(%4), %%edx\n"
26143+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26144+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26145 " movl %%eax, 16(%3)\n"
26146 " movl %%edx, 20(%3)\n"
26147- "10: movl 24(%4), %%eax\n"
26148- "51: movl 28(%4), %%edx\n"
26149+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26150+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26151 " movl %%eax, 24(%3)\n"
26152 " movl %%edx, 28(%3)\n"
26153- "11: movl 32(%4), %%eax\n"
26154- "61: movl 36(%4), %%edx\n"
26155+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26156+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26157 " movl %%eax, 32(%3)\n"
26158 " movl %%edx, 36(%3)\n"
26159- "12: movl 40(%4), %%eax\n"
26160- "71: movl 44(%4), %%edx\n"
26161+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26162+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26163 " movl %%eax, 40(%3)\n"
26164 " movl %%edx, 44(%3)\n"
26165- "13: movl 48(%4), %%eax\n"
26166- "81: movl 52(%4), %%edx\n"
26167+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26168+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26169 " movl %%eax, 48(%3)\n"
26170 " movl %%edx, 52(%3)\n"
26171- "14: movl 56(%4), %%eax\n"
26172- "91: movl 60(%4), %%edx\n"
26173+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26174+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26175 " movl %%eax, 56(%3)\n"
26176 " movl %%edx, 60(%3)\n"
26177 " addl $-64, %0\n"
26178@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26179 " shrl $2, %0\n"
26180 " andl $3, %%eax\n"
26181 " cld\n"
26182- "6: rep; movsl\n"
26183+ "6: rep; "__copyuser_seg" movsl\n"
26184 " movl %%eax,%0\n"
26185- "7: rep; movsb\n"
26186+ "7: rep; "__copyuser_seg" movsb\n"
26187 "8:\n"
26188 ".section .fixup,\"ax\"\n"
26189 "9: lea 0(%%eax,%0,4),%0\n"
26190@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26191 * hyoshiok@miraclelinux.com
26192 */
26193
26194-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26195+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26196 const void __user *from, unsigned long size)
26197 {
26198 int d0, d1;
26199
26200 __asm__ __volatile__(
26201 " .align 2,0x90\n"
26202- "0: movl 32(%4), %%eax\n"
26203+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26204 " cmpl $67, %0\n"
26205 " jbe 2f\n"
26206- "1: movl 64(%4), %%eax\n"
26207+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26208 " .align 2,0x90\n"
26209- "2: movl 0(%4), %%eax\n"
26210- "21: movl 4(%4), %%edx\n"
26211+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26212+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26213 " movnti %%eax, 0(%3)\n"
26214 " movnti %%edx, 4(%3)\n"
26215- "3: movl 8(%4), %%eax\n"
26216- "31: movl 12(%4),%%edx\n"
26217+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26218+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26219 " movnti %%eax, 8(%3)\n"
26220 " movnti %%edx, 12(%3)\n"
26221- "4: movl 16(%4), %%eax\n"
26222- "41: movl 20(%4), %%edx\n"
26223+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26224+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26225 " movnti %%eax, 16(%3)\n"
26226 " movnti %%edx, 20(%3)\n"
26227- "10: movl 24(%4), %%eax\n"
26228- "51: movl 28(%4), %%edx\n"
26229+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26230+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26231 " movnti %%eax, 24(%3)\n"
26232 " movnti %%edx, 28(%3)\n"
26233- "11: movl 32(%4), %%eax\n"
26234- "61: movl 36(%4), %%edx\n"
26235+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26236+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26237 " movnti %%eax, 32(%3)\n"
26238 " movnti %%edx, 36(%3)\n"
26239- "12: movl 40(%4), %%eax\n"
26240- "71: movl 44(%4), %%edx\n"
26241+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26242+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26243 " movnti %%eax, 40(%3)\n"
26244 " movnti %%edx, 44(%3)\n"
26245- "13: movl 48(%4), %%eax\n"
26246- "81: movl 52(%4), %%edx\n"
26247+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26248+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26249 " movnti %%eax, 48(%3)\n"
26250 " movnti %%edx, 52(%3)\n"
26251- "14: movl 56(%4), %%eax\n"
26252- "91: movl 60(%4), %%edx\n"
26253+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26254+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26255 " movnti %%eax, 56(%3)\n"
26256 " movnti %%edx, 60(%3)\n"
26257 " addl $-64, %0\n"
26258@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26259 " shrl $2, %0\n"
26260 " andl $3, %%eax\n"
26261 " cld\n"
26262- "6: rep; movsl\n"
26263+ "6: rep; "__copyuser_seg" movsl\n"
26264 " movl %%eax,%0\n"
26265- "7: rep; movsb\n"
26266+ "7: rep; "__copyuser_seg" movsb\n"
26267 "8:\n"
26268 ".section .fixup,\"ax\"\n"
26269 "9: lea 0(%%eax,%0,4),%0\n"
26270@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26271 return size;
26272 }
26273
26274-static unsigned long __copy_user_intel_nocache(void *to,
26275+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26276 const void __user *from, unsigned long size)
26277 {
26278 int d0, d1;
26279
26280 __asm__ __volatile__(
26281 " .align 2,0x90\n"
26282- "0: movl 32(%4), %%eax\n"
26283+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26284 " cmpl $67, %0\n"
26285 " jbe 2f\n"
26286- "1: movl 64(%4), %%eax\n"
26287+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26288 " .align 2,0x90\n"
26289- "2: movl 0(%4), %%eax\n"
26290- "21: movl 4(%4), %%edx\n"
26291+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26292+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26293 " movnti %%eax, 0(%3)\n"
26294 " movnti %%edx, 4(%3)\n"
26295- "3: movl 8(%4), %%eax\n"
26296- "31: movl 12(%4),%%edx\n"
26297+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26298+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26299 " movnti %%eax, 8(%3)\n"
26300 " movnti %%edx, 12(%3)\n"
26301- "4: movl 16(%4), %%eax\n"
26302- "41: movl 20(%4), %%edx\n"
26303+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26304+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26305 " movnti %%eax, 16(%3)\n"
26306 " movnti %%edx, 20(%3)\n"
26307- "10: movl 24(%4), %%eax\n"
26308- "51: movl 28(%4), %%edx\n"
26309+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26310+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26311 " movnti %%eax, 24(%3)\n"
26312 " movnti %%edx, 28(%3)\n"
26313- "11: movl 32(%4), %%eax\n"
26314- "61: movl 36(%4), %%edx\n"
26315+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26316+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26317 " movnti %%eax, 32(%3)\n"
26318 " movnti %%edx, 36(%3)\n"
26319- "12: movl 40(%4), %%eax\n"
26320- "71: movl 44(%4), %%edx\n"
26321+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26322+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26323 " movnti %%eax, 40(%3)\n"
26324 " movnti %%edx, 44(%3)\n"
26325- "13: movl 48(%4), %%eax\n"
26326- "81: movl 52(%4), %%edx\n"
26327+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26328+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26329 " movnti %%eax, 48(%3)\n"
26330 " movnti %%edx, 52(%3)\n"
26331- "14: movl 56(%4), %%eax\n"
26332- "91: movl 60(%4), %%edx\n"
26333+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26334+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26335 " movnti %%eax, 56(%3)\n"
26336 " movnti %%edx, 60(%3)\n"
26337 " addl $-64, %0\n"
26338@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26339 " shrl $2, %0\n"
26340 " andl $3, %%eax\n"
26341 " cld\n"
26342- "6: rep; movsl\n"
26343+ "6: rep; "__copyuser_seg" movsl\n"
26344 " movl %%eax,%0\n"
26345- "7: rep; movsb\n"
26346+ "7: rep; "__copyuser_seg" movsb\n"
26347 "8:\n"
26348 ".section .fixup,\"ax\"\n"
26349 "9: lea 0(%%eax,%0,4),%0\n"
26350@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26351 */
26352 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26353 unsigned long size);
26354-unsigned long __copy_user_intel(void __user *to, const void *from,
26355+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26356+ unsigned long size);
26357+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26358 unsigned long size);
26359 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26360 const void __user *from, unsigned long size);
26361 #endif /* CONFIG_X86_INTEL_USERCOPY */
26362
26363 /* Generic arbitrary sized copy. */
26364-#define __copy_user(to, from, size) \
26365+#define __copy_user(to, from, size, prefix, set, restore) \
26366 do { \
26367 int __d0, __d1, __d2; \
26368 __asm__ __volatile__( \
26369+ set \
26370 " cmp $7,%0\n" \
26371 " jbe 1f\n" \
26372 " movl %1,%0\n" \
26373 " negl %0\n" \
26374 " andl $7,%0\n" \
26375 " subl %0,%3\n" \
26376- "4: rep; movsb\n" \
26377+ "4: rep; "prefix"movsb\n" \
26378 " movl %3,%0\n" \
26379 " shrl $2,%0\n" \
26380 " andl $3,%3\n" \
26381 " .align 2,0x90\n" \
26382- "0: rep; movsl\n" \
26383+ "0: rep; "prefix"movsl\n" \
26384 " movl %3,%0\n" \
26385- "1: rep; movsb\n" \
26386+ "1: rep; "prefix"movsb\n" \
26387 "2:\n" \
26388+ restore \
26389 ".section .fixup,\"ax\"\n" \
26390 "5: addl %3,%0\n" \
26391 " jmp 2b\n" \
26392@@ -538,14 +650,14 @@ do { \
26393 " negl %0\n" \
26394 " andl $7,%0\n" \
26395 " subl %0,%3\n" \
26396- "4: rep; movsb\n" \
26397+ "4: rep; "__copyuser_seg"movsb\n" \
26398 " movl %3,%0\n" \
26399 " shrl $2,%0\n" \
26400 " andl $3,%3\n" \
26401 " .align 2,0x90\n" \
26402- "0: rep; movsl\n" \
26403+ "0: rep; "__copyuser_seg"movsl\n" \
26404 " movl %3,%0\n" \
26405- "1: rep; movsb\n" \
26406+ "1: rep; "__copyuser_seg"movsb\n" \
26407 "2:\n" \
26408 ".section .fixup,\"ax\"\n" \
26409 "5: addl %3,%0\n" \
26410@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
26411 {
26412 stac();
26413 if (movsl_is_ok(to, from, n))
26414- __copy_user(to, from, n);
26415+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
26416 else
26417- n = __copy_user_intel(to, from, n);
26418+ n = __generic_copy_to_user_intel(to, from, n);
26419 clac();
26420 return n;
26421 }
26422@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
26423 {
26424 stac();
26425 if (movsl_is_ok(to, from, n))
26426- __copy_user(to, from, n);
26427+ __copy_user(to, from, n, __copyuser_seg, "", "");
26428 else
26429- n = __copy_user_intel((void __user *)to,
26430- (const void *)from, n);
26431+ n = __generic_copy_from_user_intel(to, from, n);
26432 clac();
26433 return n;
26434 }
26435@@ -632,66 +743,51 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
26436 if (n > 64 && cpu_has_xmm2)
26437 n = __copy_user_intel_nocache(to, from, n);
26438 else
26439- __copy_user(to, from, n);
26440+ __copy_user(to, from, n, __copyuser_seg, "", "");
26441 #else
26442- __copy_user(to, from, n);
26443+ __copy_user(to, from, n, __copyuser_seg, "", "");
26444 #endif
26445 clac();
26446 return n;
26447 }
26448 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
26449
26450-/**
26451- * copy_to_user: - Copy a block of data into user space.
26452- * @to: Destination address, in user space.
26453- * @from: Source address, in kernel space.
26454- * @n: Number of bytes to copy.
26455- *
26456- * Context: User context only. This function may sleep.
26457- *
26458- * Copy data from kernel space to user space.
26459- *
26460- * Returns number of bytes that could not be copied.
26461- * On success, this will be zero.
26462- */
26463-unsigned long
26464-copy_to_user(void __user *to, const void *from, unsigned long n)
26465-{
26466- if (access_ok(VERIFY_WRITE, to, n))
26467- n = __copy_to_user(to, from, n);
26468- return n;
26469-}
26470-EXPORT_SYMBOL(copy_to_user);
26471-
26472-/**
26473- * copy_from_user: - Copy a block of data from user space.
26474- * @to: Destination address, in kernel space.
26475- * @from: Source address, in user space.
26476- * @n: Number of bytes to copy.
26477- *
26478- * Context: User context only. This function may sleep.
26479- *
26480- * Copy data from user space to kernel space.
26481- *
26482- * Returns number of bytes that could not be copied.
26483- * On success, this will be zero.
26484- *
26485- * If some data could not be copied, this function will pad the copied
26486- * data to the requested size using zero bytes.
26487- */
26488-unsigned long
26489-_copy_from_user(void *to, const void __user *from, unsigned long n)
26490-{
26491- if (access_ok(VERIFY_READ, from, n))
26492- n = __copy_from_user(to, from, n);
26493- else
26494- memset(to, 0, n);
26495- return n;
26496-}
26497-EXPORT_SYMBOL(_copy_from_user);
26498-
26499 void copy_from_user_overflow(void)
26500 {
26501 WARN(1, "Buffer overflow detected!\n");
26502 }
26503 EXPORT_SYMBOL(copy_from_user_overflow);
26504+
26505+void copy_to_user_overflow(void)
26506+{
26507+ WARN(1, "Buffer overflow detected!\n");
26508+}
26509+EXPORT_SYMBOL(copy_to_user_overflow);
26510+
26511+#ifdef CONFIG_PAX_MEMORY_UDEREF
26512+void __set_fs(mm_segment_t x)
26513+{
26514+ switch (x.seg) {
26515+ case 0:
26516+ loadsegment(gs, 0);
26517+ break;
26518+ case TASK_SIZE_MAX:
26519+ loadsegment(gs, __USER_DS);
26520+ break;
26521+ case -1UL:
26522+ loadsegment(gs, __KERNEL_DS);
26523+ break;
26524+ default:
26525+ BUG();
26526+ }
26527+ return;
26528+}
26529+EXPORT_SYMBOL(__set_fs);
26530+
26531+void set_fs(mm_segment_t x)
26532+{
26533+ current_thread_info()->addr_limit = x;
26534+ __set_fs(x);
26535+}
26536+EXPORT_SYMBOL(set_fs);
26537+#endif
26538diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
26539index 05928aa..b33dea1 100644
26540--- a/arch/x86/lib/usercopy_64.c
26541+++ b/arch/x86/lib/usercopy_64.c
26542@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
26543 _ASM_EXTABLE(0b,3b)
26544 _ASM_EXTABLE(1b,2b)
26545 : [size8] "=&c"(size), [dst] "=&D" (__d0)
26546- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
26547+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
26548 [zero] "r" (0UL), [eight] "r" (8UL));
26549 clac();
26550 return size;
26551@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
26552 }
26553 EXPORT_SYMBOL(clear_user);
26554
26555-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
26556+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
26557 {
26558- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
26559- return copy_user_generic((__force void *)to, (__force void *)from, len);
26560- }
26561- return len;
26562+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
26563+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
26564+ return len;
26565 }
26566 EXPORT_SYMBOL(copy_in_user);
26567
26568@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
26569 * it is not necessary to optimize tail handling.
26570 */
26571 unsigned long
26572-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
26573+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
26574 {
26575 char c;
26576 unsigned zero_len;
26577@@ -87,3 +86,15 @@ copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
26578 clac();
26579 return len;
26580 }
26581+
26582+void copy_from_user_overflow(void)
26583+{
26584+ WARN(1, "Buffer overflow detected!\n");
26585+}
26586+EXPORT_SYMBOL(copy_from_user_overflow);
26587+
26588+void copy_to_user_overflow(void)
26589+{
26590+ WARN(1, "Buffer overflow detected!\n");
26591+}
26592+EXPORT_SYMBOL(copy_to_user_overflow);
26593diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
26594index 903ec1e..c4166b2 100644
26595--- a/arch/x86/mm/extable.c
26596+++ b/arch/x86/mm/extable.c
26597@@ -6,12 +6,24 @@
26598 static inline unsigned long
26599 ex_insn_addr(const struct exception_table_entry *x)
26600 {
26601- return (unsigned long)&x->insn + x->insn;
26602+ unsigned long reloc = 0;
26603+
26604+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26605+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26606+#endif
26607+
26608+ return (unsigned long)&x->insn + x->insn + reloc;
26609 }
26610 static inline unsigned long
26611 ex_fixup_addr(const struct exception_table_entry *x)
26612 {
26613- return (unsigned long)&x->fixup + x->fixup;
26614+ unsigned long reloc = 0;
26615+
26616+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26617+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26618+#endif
26619+
26620+ return (unsigned long)&x->fixup + x->fixup + reloc;
26621 }
26622
26623 int fixup_exception(struct pt_regs *regs)
26624@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
26625 unsigned long new_ip;
26626
26627 #ifdef CONFIG_PNPBIOS
26628- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
26629+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
26630 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
26631 extern u32 pnp_bios_is_utter_crap;
26632 pnp_bios_is_utter_crap = 1;
26633@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
26634 i += 4;
26635 p->fixup -= i;
26636 i += 4;
26637+
26638+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26639+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
26640+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26641+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
26642+#endif
26643+
26644 }
26645 }
26646
26647diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
26648index fb674fd..272f369 100644
26649--- a/arch/x86/mm/fault.c
26650+++ b/arch/x86/mm/fault.c
26651@@ -13,12 +13,19 @@
26652 #include <linux/perf_event.h> /* perf_sw_event */
26653 #include <linux/hugetlb.h> /* hstate_index_to_shift */
26654 #include <linux/prefetch.h> /* prefetchw */
26655+#include <linux/unistd.h>
26656+#include <linux/compiler.h>
26657
26658 #include <asm/traps.h> /* dotraplinkage, ... */
26659 #include <asm/pgalloc.h> /* pgd_*(), ... */
26660 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
26661 #include <asm/fixmap.h> /* VSYSCALL_START */
26662 #include <asm/context_tracking.h> /* exception_enter(), ... */
26663+#include <asm/tlbflush.h>
26664+
26665+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26666+#include <asm/stacktrace.h>
26667+#endif
26668
26669 /*
26670 * Page fault error code bits:
26671@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
26672 int ret = 0;
26673
26674 /* kprobe_running() needs smp_processor_id() */
26675- if (kprobes_built_in() && !user_mode_vm(regs)) {
26676+ if (kprobes_built_in() && !user_mode(regs)) {
26677 preempt_disable();
26678 if (kprobe_running() && kprobe_fault_handler(regs, 14))
26679 ret = 1;
26680@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
26681 return !instr_lo || (instr_lo>>1) == 1;
26682 case 0x00:
26683 /* Prefetch instruction is 0x0F0D or 0x0F18 */
26684- if (probe_kernel_address(instr, opcode))
26685+ if (user_mode(regs)) {
26686+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
26687+ return 0;
26688+ } else if (probe_kernel_address(instr, opcode))
26689 return 0;
26690
26691 *prefetch = (instr_lo == 0xF) &&
26692@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
26693 while (instr < max_instr) {
26694 unsigned char opcode;
26695
26696- if (probe_kernel_address(instr, opcode))
26697+ if (user_mode(regs)) {
26698+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
26699+ break;
26700+ } else if (probe_kernel_address(instr, opcode))
26701 break;
26702
26703 instr++;
26704@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
26705 force_sig_info(si_signo, &info, tsk);
26706 }
26707
26708+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26709+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
26710+#endif
26711+
26712+#ifdef CONFIG_PAX_EMUTRAMP
26713+static int pax_handle_fetch_fault(struct pt_regs *regs);
26714+#endif
26715+
26716+#ifdef CONFIG_PAX_PAGEEXEC
26717+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
26718+{
26719+ pgd_t *pgd;
26720+ pud_t *pud;
26721+ pmd_t *pmd;
26722+
26723+ pgd = pgd_offset(mm, address);
26724+ if (!pgd_present(*pgd))
26725+ return NULL;
26726+ pud = pud_offset(pgd, address);
26727+ if (!pud_present(*pud))
26728+ return NULL;
26729+ pmd = pmd_offset(pud, address);
26730+ if (!pmd_present(*pmd))
26731+ return NULL;
26732+ return pmd;
26733+}
26734+#endif
26735+
26736 DEFINE_SPINLOCK(pgd_lock);
26737 LIST_HEAD(pgd_list);
26738
26739@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
26740 for (address = VMALLOC_START & PMD_MASK;
26741 address >= TASK_SIZE && address < FIXADDR_TOP;
26742 address += PMD_SIZE) {
26743+
26744+#ifdef CONFIG_PAX_PER_CPU_PGD
26745+ unsigned long cpu;
26746+#else
26747 struct page *page;
26748+#endif
26749
26750 spin_lock(&pgd_lock);
26751+
26752+#ifdef CONFIG_PAX_PER_CPU_PGD
26753+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
26754+ pgd_t *pgd = get_cpu_pgd(cpu);
26755+ pmd_t *ret;
26756+#else
26757 list_for_each_entry(page, &pgd_list, lru) {
26758+ pgd_t *pgd = page_address(page);
26759 spinlock_t *pgt_lock;
26760 pmd_t *ret;
26761
26762@@ -243,8 +296,13 @@ void vmalloc_sync_all(void)
26763 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
26764
26765 spin_lock(pgt_lock);
26766- ret = vmalloc_sync_one(page_address(page), address);
26767+#endif
26768+
26769+ ret = vmalloc_sync_one(pgd, address);
26770+
26771+#ifndef CONFIG_PAX_PER_CPU_PGD
26772 spin_unlock(pgt_lock);
26773+#endif
26774
26775 if (!ret)
26776 break;
26777@@ -278,6 +336,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
26778 * an interrupt in the middle of a task switch..
26779 */
26780 pgd_paddr = read_cr3();
26781+
26782+#ifdef CONFIG_PAX_PER_CPU_PGD
26783+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
26784+#endif
26785+
26786 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
26787 if (!pmd_k)
26788 return -1;
26789@@ -373,7 +436,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
26790 * happen within a race in page table update. In the later
26791 * case just flush:
26792 */
26793+
26794+#ifdef CONFIG_PAX_PER_CPU_PGD
26795+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
26796+ pgd = pgd_offset_cpu(smp_processor_id(), address);
26797+#else
26798 pgd = pgd_offset(current->active_mm, address);
26799+#endif
26800+
26801 pgd_ref = pgd_offset_k(address);
26802 if (pgd_none(*pgd_ref))
26803 return -1;
26804@@ -541,7 +611,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
26805 static int is_errata100(struct pt_regs *regs, unsigned long address)
26806 {
26807 #ifdef CONFIG_X86_64
26808- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
26809+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
26810 return 1;
26811 #endif
26812 return 0;
26813@@ -568,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
26814 }
26815
26816 static const char nx_warning[] = KERN_CRIT
26817-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
26818+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
26819
26820 static void
26821 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
26822@@ -577,15 +647,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
26823 if (!oops_may_print())
26824 return;
26825
26826- if (error_code & PF_INSTR) {
26827+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
26828 unsigned int level;
26829
26830 pte_t *pte = lookup_address(address, &level);
26831
26832 if (pte && pte_present(*pte) && !pte_exec(*pte))
26833- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
26834+ printk(nx_warning, from_kuid(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
26835 }
26836
26837+#ifdef CONFIG_PAX_KERNEXEC
26838+ if (init_mm.start_code <= address && address < init_mm.end_code) {
26839+ if (current->signal->curr_ip)
26840+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
26841+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
26842+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
26843+ else
26844+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
26845+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
26846+ }
26847+#endif
26848+
26849 printk(KERN_ALERT "BUG: unable to handle kernel ");
26850 if (address < PAGE_SIZE)
26851 printk(KERN_CONT "NULL pointer dereference");
26852@@ -748,6 +830,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
26853 return;
26854 }
26855 #endif
26856+
26857+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
26858+ if (pax_is_fetch_fault(regs, error_code, address)) {
26859+
26860+#ifdef CONFIG_PAX_EMUTRAMP
26861+ switch (pax_handle_fetch_fault(regs)) {
26862+ case 2:
26863+ return;
26864+ }
26865+#endif
26866+
26867+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
26868+ do_group_exit(SIGKILL);
26869+ }
26870+#endif
26871+
26872 /* Kernel addresses are always protection faults: */
26873 if (address >= TASK_SIZE)
26874 error_code |= PF_PROT;
26875@@ -833,7 +931,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
26876 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
26877 printk(KERN_ERR
26878 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
26879- tsk->comm, tsk->pid, address);
26880+ tsk->comm, task_pid_nr(tsk), address);
26881 code = BUS_MCEERR_AR;
26882 }
26883 #endif
26884@@ -896,6 +994,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
26885 return 1;
26886 }
26887
26888+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
26889+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
26890+{
26891+ pte_t *pte;
26892+ pmd_t *pmd;
26893+ spinlock_t *ptl;
26894+ unsigned char pte_mask;
26895+
26896+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
26897+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
26898+ return 0;
26899+
26900+ /* PaX: it's our fault, let's handle it if we can */
26901+
26902+ /* PaX: take a look at read faults before acquiring any locks */
26903+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
26904+ /* instruction fetch attempt from a protected page in user mode */
26905+ up_read(&mm->mmap_sem);
26906+
26907+#ifdef CONFIG_PAX_EMUTRAMP
26908+ switch (pax_handle_fetch_fault(regs)) {
26909+ case 2:
26910+ return 1;
26911+ }
26912+#endif
26913+
26914+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
26915+ do_group_exit(SIGKILL);
26916+ }
26917+
26918+ pmd = pax_get_pmd(mm, address);
26919+ if (unlikely(!pmd))
26920+ return 0;
26921+
26922+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
26923+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
26924+ pte_unmap_unlock(pte, ptl);
26925+ return 0;
26926+ }
26927+
26928+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
26929+ /* write attempt to a protected page in user mode */
26930+ pte_unmap_unlock(pte, ptl);
26931+ return 0;
26932+ }
26933+
26934+#ifdef CONFIG_SMP
26935+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
26936+#else
26937+ if (likely(address > get_limit(regs->cs)))
26938+#endif
26939+ {
26940+ set_pte(pte, pte_mkread(*pte));
26941+ __flush_tlb_one(address);
26942+ pte_unmap_unlock(pte, ptl);
26943+ up_read(&mm->mmap_sem);
26944+ return 1;
26945+ }
26946+
26947+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
26948+
26949+ /*
26950+ * PaX: fill DTLB with user rights and retry
26951+ */
26952+ __asm__ __volatile__ (
26953+ "orb %2,(%1)\n"
26954+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
26955+/*
26956+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
26957+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
26958+ * page fault when examined during a TLB load attempt. this is true not only
26959+ * for PTEs holding a non-present entry but also present entries that will
26960+ * raise a page fault (such as those set up by PaX, or the copy-on-write
26961+ * mechanism). in effect it means that we do *not* need to flush the TLBs
26962+ * for our target pages since their PTEs are simply not in the TLBs at all.
26963+
26964+ * the best thing in omitting it is that we gain around 15-20% speed in the
26965+ * fast path of the page fault handler and can get rid of tracing since we
26966+ * can no longer flush unintended entries.
26967+ */
26968+ "invlpg (%0)\n"
26969+#endif
26970+ __copyuser_seg"testb $0,(%0)\n"
26971+ "xorb %3,(%1)\n"
26972+ :
26973+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
26974+ : "memory", "cc");
26975+ pte_unmap_unlock(pte, ptl);
26976+ up_read(&mm->mmap_sem);
26977+ return 1;
26978+}
26979+#endif
26980+
26981 /*
26982 * Handle a spurious fault caused by a stale TLB entry.
26983 *
26984@@ -968,6 +1159,9 @@ int show_unhandled_signals = 1;
26985 static inline int
26986 access_error(unsigned long error_code, struct vm_area_struct *vma)
26987 {
26988+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
26989+ return 1;
26990+
26991 if (error_code & PF_WRITE) {
26992 /* write, present and write, not present: */
26993 if (unlikely(!(vma->vm_flags & VM_WRITE)))
26994@@ -996,7 +1190,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
26995 if (error_code & PF_USER)
26996 return false;
26997
26998- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
26999+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27000 return false;
27001
27002 return true;
27003@@ -1012,18 +1206,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27004 {
27005 struct vm_area_struct *vma;
27006 struct task_struct *tsk;
27007- unsigned long address;
27008 struct mm_struct *mm;
27009 int fault;
27010 int write = error_code & PF_WRITE;
27011 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27012 (write ? FAULT_FLAG_WRITE : 0);
27013
27014- tsk = current;
27015- mm = tsk->mm;
27016-
27017 /* Get the faulting address: */
27018- address = read_cr2();
27019+ unsigned long address = read_cr2();
27020+
27021+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27022+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
27023+ if (!search_exception_tables(regs->ip)) {
27024+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27025+ bad_area_nosemaphore(regs, error_code, address);
27026+ return;
27027+ }
27028+ if (address < PAX_USER_SHADOW_BASE) {
27029+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27030+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27031+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27032+ } else
27033+ address -= PAX_USER_SHADOW_BASE;
27034+ }
27035+#endif
27036+
27037+ tsk = current;
27038+ mm = tsk->mm;
27039
27040 /*
27041 * Detect and handle instructions that would cause a page fault for
27042@@ -1084,7 +1293,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27043 * User-mode registers count as a user access even for any
27044 * potential system fault or CPU buglet:
27045 */
27046- if (user_mode_vm(regs)) {
27047+ if (user_mode(regs)) {
27048 local_irq_enable();
27049 error_code |= PF_USER;
27050 } else {
27051@@ -1146,6 +1355,11 @@ retry:
27052 might_sleep();
27053 }
27054
27055+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27056+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27057+ return;
27058+#endif
27059+
27060 vma = find_vma(mm, address);
27061 if (unlikely(!vma)) {
27062 bad_area(regs, error_code, address);
27063@@ -1157,18 +1371,24 @@ retry:
27064 bad_area(regs, error_code, address);
27065 return;
27066 }
27067- if (error_code & PF_USER) {
27068- /*
27069- * Accessing the stack below %sp is always a bug.
27070- * The large cushion allows instructions like enter
27071- * and pusha to work. ("enter $65535, $31" pushes
27072- * 32 pointers and then decrements %sp by 65535.)
27073- */
27074- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27075- bad_area(regs, error_code, address);
27076- return;
27077- }
27078+ /*
27079+ * Accessing the stack below %sp is always a bug.
27080+ * The large cushion allows instructions like enter
27081+ * and pusha to work. ("enter $65535, $31" pushes
27082+ * 32 pointers and then decrements %sp by 65535.)
27083+ */
27084+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27085+ bad_area(regs, error_code, address);
27086+ return;
27087 }
27088+
27089+#ifdef CONFIG_PAX_SEGMEXEC
27090+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27091+ bad_area(regs, error_code, address);
27092+ return;
27093+ }
27094+#endif
27095+
27096 if (unlikely(expand_stack(vma, address))) {
27097 bad_area(regs, error_code, address);
27098 return;
27099@@ -1232,3 +1452,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27100 __do_page_fault(regs, error_code);
27101 exception_exit(regs);
27102 }
27103+
27104+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27105+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27106+{
27107+ struct mm_struct *mm = current->mm;
27108+ unsigned long ip = regs->ip;
27109+
27110+ if (v8086_mode(regs))
27111+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27112+
27113+#ifdef CONFIG_PAX_PAGEEXEC
27114+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27115+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27116+ return true;
27117+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27118+ return true;
27119+ return false;
27120+ }
27121+#endif
27122+
27123+#ifdef CONFIG_PAX_SEGMEXEC
27124+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27125+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27126+ return true;
27127+ return false;
27128+ }
27129+#endif
27130+
27131+ return false;
27132+}
27133+#endif
27134+
27135+#ifdef CONFIG_PAX_EMUTRAMP
27136+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27137+{
27138+ int err;
27139+
27140+ do { /* PaX: libffi trampoline emulation */
27141+ unsigned char mov, jmp;
27142+ unsigned int addr1, addr2;
27143+
27144+#ifdef CONFIG_X86_64
27145+ if ((regs->ip + 9) >> 32)
27146+ break;
27147+#endif
27148+
27149+ err = get_user(mov, (unsigned char __user *)regs->ip);
27150+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27151+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27152+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27153+
27154+ if (err)
27155+ break;
27156+
27157+ if (mov == 0xB8 && jmp == 0xE9) {
27158+ regs->ax = addr1;
27159+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27160+ return 2;
27161+ }
27162+ } while (0);
27163+
27164+ do { /* PaX: gcc trampoline emulation #1 */
27165+ unsigned char mov1, mov2;
27166+ unsigned short jmp;
27167+ unsigned int addr1, addr2;
27168+
27169+#ifdef CONFIG_X86_64
27170+ if ((regs->ip + 11) >> 32)
27171+ break;
27172+#endif
27173+
27174+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27175+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27176+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27177+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27178+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27179+
27180+ if (err)
27181+ break;
27182+
27183+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27184+ regs->cx = addr1;
27185+ regs->ax = addr2;
27186+ regs->ip = addr2;
27187+ return 2;
27188+ }
27189+ } while (0);
27190+
27191+ do { /* PaX: gcc trampoline emulation #2 */
27192+ unsigned char mov, jmp;
27193+ unsigned int addr1, addr2;
27194+
27195+#ifdef CONFIG_X86_64
27196+ if ((regs->ip + 9) >> 32)
27197+ break;
27198+#endif
27199+
27200+ err = get_user(mov, (unsigned char __user *)regs->ip);
27201+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27202+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27203+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27204+
27205+ if (err)
27206+ break;
27207+
27208+ if (mov == 0xB9 && jmp == 0xE9) {
27209+ regs->cx = addr1;
27210+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27211+ return 2;
27212+ }
27213+ } while (0);
27214+
27215+ return 1; /* PaX in action */
27216+}
27217+
27218+#ifdef CONFIG_X86_64
27219+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27220+{
27221+ int err;
27222+
27223+ do { /* PaX: libffi trampoline emulation */
27224+ unsigned short mov1, mov2, jmp1;
27225+ unsigned char stcclc, jmp2;
27226+ unsigned long addr1, addr2;
27227+
27228+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27229+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27230+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27231+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27232+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27233+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27234+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27235+
27236+ if (err)
27237+ break;
27238+
27239+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27240+ regs->r11 = addr1;
27241+ regs->r10 = addr2;
27242+ if (stcclc == 0xF8)
27243+ regs->flags &= ~X86_EFLAGS_CF;
27244+ else
27245+ regs->flags |= X86_EFLAGS_CF;
27246+ regs->ip = addr1;
27247+ return 2;
27248+ }
27249+ } while (0);
27250+
27251+ do { /* PaX: gcc trampoline emulation #1 */
27252+ unsigned short mov1, mov2, jmp1;
27253+ unsigned char jmp2;
27254+ unsigned int addr1;
27255+ unsigned long addr2;
27256+
27257+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27258+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27259+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27260+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27261+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27262+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27263+
27264+ if (err)
27265+ break;
27266+
27267+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27268+ regs->r11 = addr1;
27269+ regs->r10 = addr2;
27270+ regs->ip = addr1;
27271+ return 2;
27272+ }
27273+ } while (0);
27274+
27275+ do { /* PaX: gcc trampoline emulation #2 */
27276+ unsigned short mov1, mov2, jmp1;
27277+ unsigned char jmp2;
27278+ unsigned long addr1, addr2;
27279+
27280+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27281+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27282+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27283+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27284+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27285+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27286+
27287+ if (err)
27288+ break;
27289+
27290+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27291+ regs->r11 = addr1;
27292+ regs->r10 = addr2;
27293+ regs->ip = addr1;
27294+ return 2;
27295+ }
27296+ } while (0);
27297+
27298+ return 1; /* PaX in action */
27299+}
27300+#endif
27301+
27302+/*
27303+ * PaX: decide what to do with offenders (regs->ip = fault address)
27304+ *
27305+ * returns 1 when task should be killed
27306+ * 2 when gcc trampoline was detected
27307+ */
27308+static int pax_handle_fetch_fault(struct pt_regs *regs)
27309+{
27310+ if (v8086_mode(regs))
27311+ return 1;
27312+
27313+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27314+ return 1;
27315+
27316+#ifdef CONFIG_X86_32
27317+ return pax_handle_fetch_fault_32(regs);
27318+#else
27319+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27320+ return pax_handle_fetch_fault_32(regs);
27321+ else
27322+ return pax_handle_fetch_fault_64(regs);
27323+#endif
27324+}
27325+#endif
27326+
27327+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27328+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27329+{
27330+ long i;
27331+
27332+ printk(KERN_ERR "PAX: bytes at PC: ");
27333+ for (i = 0; i < 20; i++) {
27334+ unsigned char c;
27335+ if (get_user(c, (unsigned char __force_user *)pc+i))
27336+ printk(KERN_CONT "?? ");
27337+ else
27338+ printk(KERN_CONT "%02x ", c);
27339+ }
27340+ printk("\n");
27341+
27342+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27343+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
27344+ unsigned long c;
27345+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
27346+#ifdef CONFIG_X86_32
27347+ printk(KERN_CONT "???????? ");
27348+#else
27349+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27350+ printk(KERN_CONT "???????? ???????? ");
27351+ else
27352+ printk(KERN_CONT "???????????????? ");
27353+#endif
27354+ } else {
27355+#ifdef CONFIG_X86_64
27356+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27357+ printk(KERN_CONT "%08x ", (unsigned int)c);
27358+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27359+ } else
27360+#endif
27361+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27362+ }
27363+ }
27364+ printk("\n");
27365+}
27366+#endif
27367+
27368+/**
27369+ * probe_kernel_write(): safely attempt to write to a location
27370+ * @dst: address to write to
27371+ * @src: pointer to the data that shall be written
27372+ * @size: size of the data chunk
27373+ *
27374+ * Safely write to address @dst from the buffer at @src. If a kernel fault
27375+ * happens, handle that and return -EFAULT.
27376+ */
27377+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27378+{
27379+ long ret;
27380+ mm_segment_t old_fs = get_fs();
27381+
27382+ set_fs(KERNEL_DS);
27383+ pagefault_disable();
27384+ pax_open_kernel();
27385+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27386+ pax_close_kernel();
27387+ pagefault_enable();
27388+ set_fs(old_fs);
27389+
27390+ return ret ? -EFAULT : 0;
27391+}
27392diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27393index dd74e46..7d26398 100644
27394--- a/arch/x86/mm/gup.c
27395+++ b/arch/x86/mm/gup.c
27396@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27397 addr = start;
27398 len = (unsigned long) nr_pages << PAGE_SHIFT;
27399 end = start + len;
27400- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27401+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27402 (void __user *)start, len)))
27403 return 0;
27404
27405diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27406index 6f31ee5..8ee4164 100644
27407--- a/arch/x86/mm/highmem_32.c
27408+++ b/arch/x86/mm/highmem_32.c
27409@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27410 idx = type + KM_TYPE_NR*smp_processor_id();
27411 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27412 BUG_ON(!pte_none(*(kmap_pte-idx)));
27413+
27414+ pax_open_kernel();
27415 set_pte(kmap_pte-idx, mk_pte(page, prot));
27416+ pax_close_kernel();
27417+
27418 arch_flush_lazy_mmu_mode();
27419
27420 return (void *)vaddr;
27421diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27422index ae1aa71..56316db 100644
27423--- a/arch/x86/mm/hugetlbpage.c
27424+++ b/arch/x86/mm/hugetlbpage.c
27425@@ -279,6 +279,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27426 info.flags = 0;
27427 info.length = len;
27428 info.low_limit = TASK_UNMAPPED_BASE;
27429+
27430+#ifdef CONFIG_PAX_RANDMMAP
27431+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27432+ info.low_limit += current->mm->delta_mmap;
27433+#endif
27434+
27435 info.high_limit = TASK_SIZE;
27436 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27437 info.align_offset = 0;
27438@@ -311,6 +317,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
27439 VM_BUG_ON(addr != -ENOMEM);
27440 info.flags = 0;
27441 info.low_limit = TASK_UNMAPPED_BASE;
27442+
27443+#ifdef CONFIG_PAX_RANDMMAP
27444+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27445+ info.low_limit += current->mm->delta_mmap;
27446+#endif
27447+
27448 info.high_limit = TASK_SIZE;
27449 addr = vm_unmapped_area(&info);
27450 }
27451@@ -325,10 +337,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27452 struct hstate *h = hstate_file(file);
27453 struct mm_struct *mm = current->mm;
27454 struct vm_area_struct *vma;
27455+ unsigned long pax_task_size = TASK_SIZE;
27456+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
27457
27458 if (len & ~huge_page_mask(h))
27459 return -EINVAL;
27460- if (len > TASK_SIZE)
27461+
27462+#ifdef CONFIG_PAX_SEGMEXEC
27463+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
27464+ pax_task_size = SEGMEXEC_TASK_SIZE;
27465+#endif
27466+
27467+ pax_task_size -= PAGE_SIZE;
27468+
27469+ if (len > pax_task_size)
27470 return -ENOMEM;
27471
27472 if (flags & MAP_FIXED) {
27473@@ -337,11 +359,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
27474 return addr;
27475 }
27476
27477+#ifdef CONFIG_PAX_RANDMMAP
27478+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
27479+#endif
27480+
27481 if (addr) {
27482 addr = ALIGN(addr, huge_page_size(h));
27483 vma = find_vma(mm, addr);
27484- if (TASK_SIZE - len >= addr &&
27485- (!vma || addr + len <= vma->vm_start))
27486+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
27487 return addr;
27488 }
27489 if (mm->get_unmapped_area == arch_get_unmapped_area)
27490diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
27491index d7aea41..0fc945b 100644
27492--- a/arch/x86/mm/init.c
27493+++ b/arch/x86/mm/init.c
27494@@ -4,6 +4,7 @@
27495 #include <linux/swap.h>
27496 #include <linux/memblock.h>
27497 #include <linux/bootmem.h> /* for max_low_pfn */
27498+#include <linux/tboot.h>
27499
27500 #include <asm/cacheflush.h>
27501 #include <asm/e820.h>
27502@@ -16,6 +17,8 @@
27503 #include <asm/tlb.h>
27504 #include <asm/proto.h>
27505 #include <asm/dma.h> /* for MAX_DMA_PFN */
27506+#include <asm/desc.h>
27507+#include <asm/bios_ebda.h>
27508
27509 unsigned long __initdata pgt_buf_start;
27510 unsigned long __meminitdata pgt_buf_end;
27511@@ -44,7 +47,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range)
27512 {
27513 int i;
27514 unsigned long puds = 0, pmds = 0, ptes = 0, tables;
27515- unsigned long start = 0, good_end;
27516+ unsigned long start = 0x100000, good_end;
27517 phys_addr_t base;
27518
27519 for (i = 0; i < nr_range; i++) {
27520@@ -321,10 +324,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
27521 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
27522 * mmio resources as well as potential bios/acpi data regions.
27523 */
27524+
27525+#ifdef CONFIG_GRKERNSEC_KMEM
27526+static unsigned int ebda_start __read_only;
27527+static unsigned int ebda_end __read_only;
27528+#endif
27529+
27530 int devmem_is_allowed(unsigned long pagenr)
27531 {
27532- if (pagenr < 256)
27533+#ifdef CONFIG_GRKERNSEC_KMEM
27534+ /* allow BDA */
27535+ if (!pagenr)
27536 return 1;
27537+ /* allow EBDA */
27538+ if (pagenr >= ebda_start && pagenr < ebda_end)
27539+ return 1;
27540+ /* if tboot is in use, allow access to its hardcoded serial log range */
27541+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
27542+ return 1;
27543+#else
27544+ if (!pagenr)
27545+ return 1;
27546+#ifdef CONFIG_VM86
27547+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
27548+ return 1;
27549+#endif
27550+#endif
27551+
27552+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
27553+ return 1;
27554+#ifdef CONFIG_GRKERNSEC_KMEM
27555+ /* throw out everything else below 1MB */
27556+ if (pagenr <= 256)
27557+ return 0;
27558+#endif
27559 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
27560 return 0;
27561 if (!page_is_ram(pagenr))
27562@@ -381,8 +414,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
27563 #endif
27564 }
27565
27566+#ifdef CONFIG_GRKERNSEC_KMEM
27567+static inline void gr_init_ebda(void)
27568+{
27569+ unsigned int ebda_addr;
27570+ unsigned int ebda_size = 0;
27571+
27572+ ebda_addr = get_bios_ebda();
27573+ if (ebda_addr) {
27574+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
27575+ ebda_size <<= 10;
27576+ }
27577+ if (ebda_addr && ebda_size) {
27578+ ebda_start = ebda_addr >> PAGE_SHIFT;
27579+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
27580+ } else {
27581+ ebda_start = 0x9f000 >> PAGE_SHIFT;
27582+ ebda_end = 0xa0000 >> PAGE_SHIFT;
27583+ }
27584+}
27585+#else
27586+static inline void gr_init_ebda(void) { }
27587+#endif
27588+
27589 void free_initmem(void)
27590 {
27591+#ifdef CONFIG_PAX_KERNEXEC
27592+#ifdef CONFIG_X86_32
27593+ /* PaX: limit KERNEL_CS to actual size */
27594+ unsigned long addr, limit;
27595+ struct desc_struct d;
27596+ int cpu;
27597+#else
27598+ pgd_t *pgd;
27599+ pud_t *pud;
27600+ pmd_t *pmd;
27601+ unsigned long addr, end;
27602+#endif
27603+#endif
27604+
27605+ gr_init_ebda();
27606+
27607+#ifdef CONFIG_PAX_KERNEXEC
27608+#ifdef CONFIG_X86_32
27609+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
27610+ limit = (limit - 1UL) >> PAGE_SHIFT;
27611+
27612+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
27613+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
27614+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
27615+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
27616+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
27617+ }
27618+
27619+ /* PaX: make KERNEL_CS read-only */
27620+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
27621+ if (!paravirt_enabled())
27622+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
27623+/*
27624+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
27625+ pgd = pgd_offset_k(addr);
27626+ pud = pud_offset(pgd, addr);
27627+ pmd = pmd_offset(pud, addr);
27628+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27629+ }
27630+*/
27631+#ifdef CONFIG_X86_PAE
27632+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
27633+/*
27634+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
27635+ pgd = pgd_offset_k(addr);
27636+ pud = pud_offset(pgd, addr);
27637+ pmd = pmd_offset(pud, addr);
27638+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
27639+ }
27640+*/
27641+#endif
27642+
27643+#ifdef CONFIG_MODULES
27644+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
27645+#endif
27646+
27647+#else
27648+ /* PaX: make kernel code/rodata read-only, rest non-executable */
27649+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
27650+ pgd = pgd_offset_k(addr);
27651+ pud = pud_offset(pgd, addr);
27652+ pmd = pmd_offset(pud, addr);
27653+ if (!pmd_present(*pmd))
27654+ continue;
27655+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
27656+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27657+ else
27658+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
27659+ }
27660+
27661+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
27662+ end = addr + KERNEL_IMAGE_SIZE;
27663+ for (; addr < end; addr += PMD_SIZE) {
27664+ pgd = pgd_offset_k(addr);
27665+ pud = pud_offset(pgd, addr);
27666+ pmd = pmd_offset(pud, addr);
27667+ if (!pmd_present(*pmd))
27668+ continue;
27669+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
27670+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
27671+ }
27672+#endif
27673+
27674+ flush_tlb_all();
27675+#endif
27676+
27677 free_init_pages("unused kernel memory",
27678 (unsigned long)(&__init_begin),
27679 (unsigned long)(&__init_end));
27680diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
27681index 745d66b..56bf568 100644
27682--- a/arch/x86/mm/init_32.c
27683+++ b/arch/x86/mm/init_32.c
27684@@ -73,36 +73,6 @@ static __init void *alloc_low_page(void)
27685 }
27686
27687 /*
27688- * Creates a middle page table and puts a pointer to it in the
27689- * given global directory entry. This only returns the gd entry
27690- * in non-PAE compilation mode, since the middle layer is folded.
27691- */
27692-static pmd_t * __init one_md_table_init(pgd_t *pgd)
27693-{
27694- pud_t *pud;
27695- pmd_t *pmd_table;
27696-
27697-#ifdef CONFIG_X86_PAE
27698- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
27699- if (after_bootmem)
27700- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
27701- else
27702- pmd_table = (pmd_t *)alloc_low_page();
27703- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
27704- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
27705- pud = pud_offset(pgd, 0);
27706- BUG_ON(pmd_table != pmd_offset(pud, 0));
27707-
27708- return pmd_table;
27709- }
27710-#endif
27711- pud = pud_offset(pgd, 0);
27712- pmd_table = pmd_offset(pud, 0);
27713-
27714- return pmd_table;
27715-}
27716-
27717-/*
27718 * Create a page table and place a pointer to it in a middle page
27719 * directory entry:
27720 */
27721@@ -122,13 +92,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
27722 page_table = (pte_t *)alloc_low_page();
27723
27724 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
27725+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27726+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
27727+#else
27728 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
27729+#endif
27730 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
27731 }
27732
27733 return pte_offset_kernel(pmd, 0);
27734 }
27735
27736+static pmd_t * __init one_md_table_init(pgd_t *pgd)
27737+{
27738+ pud_t *pud;
27739+ pmd_t *pmd_table;
27740+
27741+ pud = pud_offset(pgd, 0);
27742+ pmd_table = pmd_offset(pud, 0);
27743+
27744+ return pmd_table;
27745+}
27746+
27747 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
27748 {
27749 int pgd_idx = pgd_index(vaddr);
27750@@ -202,6 +187,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27751 int pgd_idx, pmd_idx;
27752 unsigned long vaddr;
27753 pgd_t *pgd;
27754+ pud_t *pud;
27755 pmd_t *pmd;
27756 pte_t *pte = NULL;
27757
27758@@ -211,8 +197,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27759 pgd = pgd_base + pgd_idx;
27760
27761 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
27762- pmd = one_md_table_init(pgd);
27763- pmd = pmd + pmd_index(vaddr);
27764+ pud = pud_offset(pgd, vaddr);
27765+ pmd = pmd_offset(pud, vaddr);
27766+
27767+#ifdef CONFIG_X86_PAE
27768+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
27769+#endif
27770+
27771 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
27772 pmd++, pmd_idx++) {
27773 pte = page_table_kmap_check(one_page_table_init(pmd),
27774@@ -224,11 +215,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
27775 }
27776 }
27777
27778-static inline int is_kernel_text(unsigned long addr)
27779+static inline int is_kernel_text(unsigned long start, unsigned long end)
27780 {
27781- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
27782- return 1;
27783- return 0;
27784+ if ((start > ktla_ktva((unsigned long)_etext) ||
27785+ end <= ktla_ktva((unsigned long)_stext)) &&
27786+ (start > ktla_ktva((unsigned long)_einittext) ||
27787+ end <= ktla_ktva((unsigned long)_sinittext)) &&
27788+
27789+#ifdef CONFIG_ACPI_SLEEP
27790+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
27791+#endif
27792+
27793+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
27794+ return 0;
27795+ return 1;
27796 }
27797
27798 /*
27799@@ -245,9 +245,10 @@ kernel_physical_mapping_init(unsigned long start,
27800 unsigned long last_map_addr = end;
27801 unsigned long start_pfn, end_pfn;
27802 pgd_t *pgd_base = swapper_pg_dir;
27803- int pgd_idx, pmd_idx, pte_ofs;
27804+ unsigned int pgd_idx, pmd_idx, pte_ofs;
27805 unsigned long pfn;
27806 pgd_t *pgd;
27807+ pud_t *pud;
27808 pmd_t *pmd;
27809 pte_t *pte;
27810 unsigned pages_2m, pages_4k;
27811@@ -280,8 +281,13 @@ repeat:
27812 pfn = start_pfn;
27813 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
27814 pgd = pgd_base + pgd_idx;
27815- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
27816- pmd = one_md_table_init(pgd);
27817+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
27818+ pud = pud_offset(pgd, 0);
27819+ pmd = pmd_offset(pud, 0);
27820+
27821+#ifdef CONFIG_X86_PAE
27822+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
27823+#endif
27824
27825 if (pfn >= end_pfn)
27826 continue;
27827@@ -293,14 +299,13 @@ repeat:
27828 #endif
27829 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
27830 pmd++, pmd_idx++) {
27831- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
27832+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
27833
27834 /*
27835 * Map with big pages if possible, otherwise
27836 * create normal page tables:
27837 */
27838 if (use_pse) {
27839- unsigned int addr2;
27840 pgprot_t prot = PAGE_KERNEL_LARGE;
27841 /*
27842 * first pass will use the same initial
27843@@ -310,11 +315,7 @@ repeat:
27844 __pgprot(PTE_IDENT_ATTR |
27845 _PAGE_PSE);
27846
27847- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
27848- PAGE_OFFSET + PAGE_SIZE-1;
27849-
27850- if (is_kernel_text(addr) ||
27851- is_kernel_text(addr2))
27852+ if (is_kernel_text(address, address + PMD_SIZE))
27853 prot = PAGE_KERNEL_LARGE_EXEC;
27854
27855 pages_2m++;
27856@@ -331,7 +332,7 @@ repeat:
27857 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
27858 pte += pte_ofs;
27859 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
27860- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
27861+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
27862 pgprot_t prot = PAGE_KERNEL;
27863 /*
27864 * first pass will use the same initial
27865@@ -339,7 +340,7 @@ repeat:
27866 */
27867 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
27868
27869- if (is_kernel_text(addr))
27870+ if (is_kernel_text(address, address + PAGE_SIZE))
27871 prot = PAGE_KERNEL_EXEC;
27872
27873 pages_4k++;
27874@@ -465,7 +466,7 @@ void __init native_pagetable_init(void)
27875
27876 pud = pud_offset(pgd, va);
27877 pmd = pmd_offset(pud, va);
27878- if (!pmd_present(*pmd))
27879+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
27880 break;
27881
27882 pte = pte_offset_kernel(pmd, va);
27883@@ -514,12 +515,10 @@ void __init early_ioremap_page_table_range_init(void)
27884
27885 static void __init pagetable_init(void)
27886 {
27887- pgd_t *pgd_base = swapper_pg_dir;
27888-
27889- permanent_kmaps_init(pgd_base);
27890+ permanent_kmaps_init(swapper_pg_dir);
27891 }
27892
27893-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
27894+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
27895 EXPORT_SYMBOL_GPL(__supported_pte_mask);
27896
27897 /* user-defined highmem size */
27898@@ -728,6 +727,12 @@ void __init mem_init(void)
27899
27900 pci_iommu_alloc();
27901
27902+#ifdef CONFIG_PAX_PER_CPU_PGD
27903+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
27904+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
27905+ KERNEL_PGD_PTRS);
27906+#endif
27907+
27908 #ifdef CONFIG_FLATMEM
27909 BUG_ON(!mem_map);
27910 #endif
27911@@ -754,7 +759,7 @@ void __init mem_init(void)
27912 reservedpages++;
27913
27914 codesize = (unsigned long) &_etext - (unsigned long) &_text;
27915- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
27916+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
27917 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
27918
27919 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
27920@@ -795,10 +800,10 @@ void __init mem_init(void)
27921 ((unsigned long)&__init_end -
27922 (unsigned long)&__init_begin) >> 10,
27923
27924- (unsigned long)&_etext, (unsigned long)&_edata,
27925- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
27926+ (unsigned long)&_sdata, (unsigned long)&_edata,
27927+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
27928
27929- (unsigned long)&_text, (unsigned long)&_etext,
27930+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
27931 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
27932
27933 /*
27934@@ -876,6 +881,7 @@ void set_kernel_text_rw(void)
27935 if (!kernel_set_to_readonly)
27936 return;
27937
27938+ start = ktla_ktva(start);
27939 pr_debug("Set kernel text: %lx - %lx for read write\n",
27940 start, start+size);
27941
27942@@ -890,6 +896,7 @@ void set_kernel_text_ro(void)
27943 if (!kernel_set_to_readonly)
27944 return;
27945
27946+ start = ktla_ktva(start);
27947 pr_debug("Set kernel text: %lx - %lx for read only\n",
27948 start, start+size);
27949
27950@@ -918,6 +925,7 @@ void mark_rodata_ro(void)
27951 unsigned long start = PFN_ALIGN(_text);
27952 unsigned long size = PFN_ALIGN(_etext) - start;
27953
27954+ start = ktla_ktva(start);
27955 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
27956 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
27957 size >> 10);
27958diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
27959index 75c9a6a..498d677 100644
27960--- a/arch/x86/mm/init_64.c
27961+++ b/arch/x86/mm/init_64.c
27962@@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpages_on);
27963 * around without checking the pgd every time.
27964 */
27965
27966-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
27967+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
27968 EXPORT_SYMBOL_GPL(__supported_pte_mask);
27969
27970 int force_personality32;
27971@@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27972
27973 for (address = start; address <= end; address += PGDIR_SIZE) {
27974 const pgd_t *pgd_ref = pgd_offset_k(address);
27975+
27976+#ifdef CONFIG_PAX_PER_CPU_PGD
27977+ unsigned long cpu;
27978+#else
27979 struct page *page;
27980+#endif
27981
27982 if (pgd_none(*pgd_ref))
27983 continue;
27984
27985 spin_lock(&pgd_lock);
27986+
27987+#ifdef CONFIG_PAX_PER_CPU_PGD
27988+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27989+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
27990+#else
27991 list_for_each_entry(page, &pgd_list, lru) {
27992 pgd_t *pgd;
27993 spinlock_t *pgt_lock;
27994@@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
27995 /* the pgt_lock only for Xen */
27996 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27997 spin_lock(pgt_lock);
27998+#endif
27999
28000 if (pgd_none(*pgd))
28001 set_pgd(pgd, *pgd_ref);
28002@@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28003 BUG_ON(pgd_page_vaddr(*pgd)
28004 != pgd_page_vaddr(*pgd_ref));
28005
28006+#ifndef CONFIG_PAX_PER_CPU_PGD
28007 spin_unlock(pgt_lock);
28008+#endif
28009+
28010 }
28011 spin_unlock(&pgd_lock);
28012 }
28013@@ -161,7 +175,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28014 {
28015 if (pgd_none(*pgd)) {
28016 pud_t *pud = (pud_t *)spp_getpage();
28017- pgd_populate(&init_mm, pgd, pud);
28018+ pgd_populate_kernel(&init_mm, pgd, pud);
28019 if (pud != pud_offset(pgd, 0))
28020 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28021 pud, pud_offset(pgd, 0));
28022@@ -173,7 +187,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28023 {
28024 if (pud_none(*pud)) {
28025 pmd_t *pmd = (pmd_t *) spp_getpage();
28026- pud_populate(&init_mm, pud, pmd);
28027+ pud_populate_kernel(&init_mm, pud, pmd);
28028 if (pmd != pmd_offset(pud, 0))
28029 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28030 pmd, pmd_offset(pud, 0));
28031@@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28032 pmd = fill_pmd(pud, vaddr);
28033 pte = fill_pte(pmd, vaddr);
28034
28035+ pax_open_kernel();
28036 set_pte(pte, new_pte);
28037+ pax_close_kernel();
28038
28039 /*
28040 * It's enough to flush this one mapping.
28041@@ -261,14 +277,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28042 pgd = pgd_offset_k((unsigned long)__va(phys));
28043 if (pgd_none(*pgd)) {
28044 pud = (pud_t *) spp_getpage();
28045- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28046- _PAGE_USER));
28047+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28048 }
28049 pud = pud_offset(pgd, (unsigned long)__va(phys));
28050 if (pud_none(*pud)) {
28051 pmd = (pmd_t *) spp_getpage();
28052- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28053- _PAGE_USER));
28054+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28055 }
28056 pmd = pmd_offset(pud, phys);
28057 BUG_ON(!pmd_none(*pmd));
28058@@ -329,7 +343,7 @@ static __ref void *alloc_low_page(unsigned long *phys)
28059 if (pfn >= pgt_buf_top)
28060 panic("alloc_low_page: ran out of memory");
28061
28062- adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28063+ adr = (void __force_kernel *)early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
28064 clear_page(adr);
28065 *phys = pfn * PAGE_SIZE;
28066 return adr;
28067@@ -345,7 +359,7 @@ static __ref void *map_low_page(void *virt)
28068
28069 phys = __pa(virt);
28070 left = phys & (PAGE_SIZE - 1);
28071- adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28072+ adr = (void __force_kernel *)early_memremap(phys & PAGE_MASK, PAGE_SIZE);
28073 adr = (void *)(((unsigned long)adr) | left);
28074
28075 return adr;
28076@@ -553,7 +567,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28077 unmap_low_page(pmd);
28078
28079 spin_lock(&init_mm.page_table_lock);
28080- pud_populate(&init_mm, pud, __va(pmd_phys));
28081+ pud_populate_kernel(&init_mm, pud, __va(pmd_phys));
28082 spin_unlock(&init_mm.page_table_lock);
28083 }
28084 __flush_tlb_all();
28085@@ -599,7 +613,7 @@ kernel_physical_mapping_init(unsigned long start,
28086 unmap_low_page(pud);
28087
28088 spin_lock(&init_mm.page_table_lock);
28089- pgd_populate(&init_mm, pgd, __va(pud_phys));
28090+ pgd_populate_kernel(&init_mm, pgd, __va(pud_phys));
28091 spin_unlock(&init_mm.page_table_lock);
28092 pgd_changed = true;
28093 }
28094@@ -693,6 +707,12 @@ void __init mem_init(void)
28095
28096 pci_iommu_alloc();
28097
28098+#ifdef CONFIG_PAX_PER_CPU_PGD
28099+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28100+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28101+ KERNEL_PGD_PTRS);
28102+#endif
28103+
28104 /* clear_bss() already clear the empty_zero_page */
28105
28106 reservedpages = 0;
28107@@ -856,8 +876,8 @@ int kern_addr_valid(unsigned long addr)
28108 static struct vm_area_struct gate_vma = {
28109 .vm_start = VSYSCALL_START,
28110 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28111- .vm_page_prot = PAGE_READONLY_EXEC,
28112- .vm_flags = VM_READ | VM_EXEC
28113+ .vm_page_prot = PAGE_READONLY,
28114+ .vm_flags = VM_READ
28115 };
28116
28117 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28118@@ -891,7 +911,7 @@ int in_gate_area_no_mm(unsigned long addr)
28119
28120 const char *arch_vma_name(struct vm_area_struct *vma)
28121 {
28122- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28123+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28124 return "[vdso]";
28125 if (vma == &gate_vma)
28126 return "[vsyscall]";
28127diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28128index 7b179b4..6bd1777 100644
28129--- a/arch/x86/mm/iomap_32.c
28130+++ b/arch/x86/mm/iomap_32.c
28131@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28132 type = kmap_atomic_idx_push();
28133 idx = type + KM_TYPE_NR * smp_processor_id();
28134 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28135+
28136+ pax_open_kernel();
28137 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28138+ pax_close_kernel();
28139+
28140 arch_flush_lazy_mmu_mode();
28141
28142 return (void *)vaddr;
28143diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28144index 78fe3f1..2f9433c 100644
28145--- a/arch/x86/mm/ioremap.c
28146+++ b/arch/x86/mm/ioremap.c
28147@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28148 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28149 int is_ram = page_is_ram(pfn);
28150
28151- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28152+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28153 return NULL;
28154 WARN_ON_ONCE(is_ram);
28155 }
28156@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28157 *
28158 * Caller must ensure there is only one unmapping for the same pointer.
28159 */
28160-void iounmap(volatile void __iomem *addr)
28161+void iounmap(const volatile void __iomem *addr)
28162 {
28163 struct vm_struct *p, *o;
28164
28165@@ -315,6 +315,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28166
28167 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28168 if (page_is_ram(start >> PAGE_SHIFT))
28169+#ifdef CONFIG_HIGHMEM
28170+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28171+#endif
28172 return __va(phys);
28173
28174 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28175@@ -344,7 +347,7 @@ static int __init early_ioremap_debug_setup(char *str)
28176 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28177
28178 static __initdata int after_paging_init;
28179-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28180+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28181
28182 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28183 {
28184@@ -381,8 +384,7 @@ void __init early_ioremap_init(void)
28185 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28186
28187 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28188- memset(bm_pte, 0, sizeof(bm_pte));
28189- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28190+ pmd_populate_user(&init_mm, pmd, bm_pte);
28191
28192 /*
28193 * The boot-ioremap range spans multiple pmds, for which
28194diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28195index d87dd6d..bf3fa66 100644
28196--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28197+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28198@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28199 * memory (e.g. tracked pages)? For now, we need this to avoid
28200 * invoking kmemcheck for PnP BIOS calls.
28201 */
28202- if (regs->flags & X86_VM_MASK)
28203+ if (v8086_mode(regs))
28204 return false;
28205- if (regs->cs != __KERNEL_CS)
28206+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28207 return false;
28208
28209 pte = kmemcheck_pte_lookup(address);
28210diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28211index 845df68..1d8d29f 100644
28212--- a/arch/x86/mm/mmap.c
28213+++ b/arch/x86/mm/mmap.c
28214@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28215 * Leave an at least ~128 MB hole with possible stack randomization.
28216 */
28217 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28218-#define MAX_GAP (TASK_SIZE/6*5)
28219+#define MAX_GAP (pax_task_size/6*5)
28220
28221 static int mmap_is_legacy(void)
28222 {
28223@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28224 return rnd << PAGE_SHIFT;
28225 }
28226
28227-static unsigned long mmap_base(void)
28228+static unsigned long mmap_base(struct mm_struct *mm)
28229 {
28230 unsigned long gap = rlimit(RLIMIT_STACK);
28231+ unsigned long pax_task_size = TASK_SIZE;
28232+
28233+#ifdef CONFIG_PAX_SEGMEXEC
28234+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28235+ pax_task_size = SEGMEXEC_TASK_SIZE;
28236+#endif
28237
28238 if (gap < MIN_GAP)
28239 gap = MIN_GAP;
28240 else if (gap > MAX_GAP)
28241 gap = MAX_GAP;
28242
28243- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28244+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28245 }
28246
28247 /*
28248 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28249 * does, but not when emulating X86_32
28250 */
28251-static unsigned long mmap_legacy_base(void)
28252+static unsigned long mmap_legacy_base(struct mm_struct *mm)
28253 {
28254- if (mmap_is_ia32())
28255+ if (mmap_is_ia32()) {
28256+
28257+#ifdef CONFIG_PAX_SEGMEXEC
28258+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28259+ return SEGMEXEC_TASK_UNMAPPED_BASE;
28260+ else
28261+#endif
28262+
28263 return TASK_UNMAPPED_BASE;
28264- else
28265+ } else
28266 return TASK_UNMAPPED_BASE + mmap_rnd();
28267 }
28268
28269@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28270 void arch_pick_mmap_layout(struct mm_struct *mm)
28271 {
28272 if (mmap_is_legacy()) {
28273- mm->mmap_base = mmap_legacy_base();
28274+ mm->mmap_base = mmap_legacy_base(mm);
28275+
28276+#ifdef CONFIG_PAX_RANDMMAP
28277+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28278+ mm->mmap_base += mm->delta_mmap;
28279+#endif
28280+
28281 mm->get_unmapped_area = arch_get_unmapped_area;
28282 mm->unmap_area = arch_unmap_area;
28283 } else {
28284- mm->mmap_base = mmap_base();
28285+ mm->mmap_base = mmap_base(mm);
28286+
28287+#ifdef CONFIG_PAX_RANDMMAP
28288+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28289+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28290+#endif
28291+
28292 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28293 mm->unmap_area = arch_unmap_area_topdown;
28294 }
28295diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28296index dc0b727..f612039 100644
28297--- a/arch/x86/mm/mmio-mod.c
28298+++ b/arch/x86/mm/mmio-mod.c
28299@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28300 break;
28301 default:
28302 {
28303- unsigned char *ip = (unsigned char *)instptr;
28304+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28305 my_trace->opcode = MMIO_UNKNOWN_OP;
28306 my_trace->width = 0;
28307 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28308@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28309 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28310 void __iomem *addr)
28311 {
28312- static atomic_t next_id;
28313+ static atomic_unchecked_t next_id;
28314 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28315 /* These are page-unaligned. */
28316 struct mmiotrace_map map = {
28317@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28318 .private = trace
28319 },
28320 .phys = offset,
28321- .id = atomic_inc_return(&next_id)
28322+ .id = atomic_inc_return_unchecked(&next_id)
28323 };
28324 map.map_id = trace->id;
28325
28326@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28327 ioremap_trace_core(offset, size, addr);
28328 }
28329
28330-static void iounmap_trace_core(volatile void __iomem *addr)
28331+static void iounmap_trace_core(const volatile void __iomem *addr)
28332 {
28333 struct mmiotrace_map map = {
28334 .phys = 0,
28335@@ -328,7 +328,7 @@ not_enabled:
28336 }
28337 }
28338
28339-void mmiotrace_iounmap(volatile void __iomem *addr)
28340+void mmiotrace_iounmap(const volatile void __iomem *addr)
28341 {
28342 might_sleep();
28343 if (is_enabled()) /* recheck and proper locking in *_core() */
28344diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28345index b008656..773eac2 100644
28346--- a/arch/x86/mm/pageattr-test.c
28347+++ b/arch/x86/mm/pageattr-test.c
28348@@ -36,7 +36,7 @@ enum {
28349
28350 static int pte_testbit(pte_t pte)
28351 {
28352- return pte_flags(pte) & _PAGE_UNUSED1;
28353+ return pte_flags(pte) & _PAGE_CPA_TEST;
28354 }
28355
28356 struct split_state {
28357diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28358index a718e0d..77419bc 100644
28359--- a/arch/x86/mm/pageattr.c
28360+++ b/arch/x86/mm/pageattr.c
28361@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28362 */
28363 #ifdef CONFIG_PCI_BIOS
28364 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28365- pgprot_val(forbidden) |= _PAGE_NX;
28366+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28367 #endif
28368
28369 /*
28370@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28371 * Does not cover __inittext since that is gone later on. On
28372 * 64bit we do not enforce !NX on the low mapping
28373 */
28374- if (within(address, (unsigned long)_text, (unsigned long)_etext))
28375- pgprot_val(forbidden) |= _PAGE_NX;
28376+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28377+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28378
28379+#ifdef CONFIG_DEBUG_RODATA
28380 /*
28381 * The .rodata section needs to be read-only. Using the pfn
28382 * catches all aliases.
28383@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28384 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
28385 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
28386 pgprot_val(forbidden) |= _PAGE_RW;
28387+#endif
28388
28389 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28390 /*
28391@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28392 }
28393 #endif
28394
28395+#ifdef CONFIG_PAX_KERNEXEC
28396+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28397+ pgprot_val(forbidden) |= _PAGE_RW;
28398+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28399+ }
28400+#endif
28401+
28402 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28403
28404 return prot;
28405@@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
28406 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28407 {
28408 /* change init_mm */
28409+ pax_open_kernel();
28410 set_pte_atomic(kpte, pte);
28411+
28412 #ifdef CONFIG_X86_32
28413 if (!SHARED_KERNEL_PMD) {
28414+
28415+#ifdef CONFIG_PAX_PER_CPU_PGD
28416+ unsigned long cpu;
28417+#else
28418 struct page *page;
28419+#endif
28420
28421+#ifdef CONFIG_PAX_PER_CPU_PGD
28422+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28423+ pgd_t *pgd = get_cpu_pgd(cpu);
28424+#else
28425 list_for_each_entry(page, &pgd_list, lru) {
28426- pgd_t *pgd;
28427+ pgd_t *pgd = (pgd_t *)page_address(page);
28428+#endif
28429+
28430 pud_t *pud;
28431 pmd_t *pmd;
28432
28433- pgd = (pgd_t *)page_address(page) + pgd_index(address);
28434+ pgd += pgd_index(address);
28435 pud = pud_offset(pgd, address);
28436 pmd = pmd_offset(pud, address);
28437 set_pte_atomic((pte_t *)pmd, pte);
28438 }
28439 }
28440 #endif
28441+ pax_close_kernel();
28442 }
28443
28444 static int
28445diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
28446index 0eb572e..92f5c1e 100644
28447--- a/arch/x86/mm/pat.c
28448+++ b/arch/x86/mm/pat.c
28449@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
28450
28451 if (!entry) {
28452 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
28453- current->comm, current->pid, start, end - 1);
28454+ current->comm, task_pid_nr(current), start, end - 1);
28455 return -EINVAL;
28456 }
28457
28458@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28459
28460 while (cursor < to) {
28461 if (!devmem_is_allowed(pfn)) {
28462- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
28463- current->comm, from, to - 1);
28464+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
28465+ current->comm, from, to - 1, cursor);
28466 return 0;
28467 }
28468 cursor += PAGE_SIZE;
28469@@ -570,7 +570,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
28470 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
28471 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
28472 "for [mem %#010Lx-%#010Lx]\n",
28473- current->comm, current->pid,
28474+ current->comm, task_pid_nr(current),
28475 cattr_name(flags),
28476 base, (unsigned long long)(base + size-1));
28477 return -EINVAL;
28478@@ -605,7 +605,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28479 flags = lookup_memtype(paddr);
28480 if (want_flags != flags) {
28481 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
28482- current->comm, current->pid,
28483+ current->comm, task_pid_nr(current),
28484 cattr_name(want_flags),
28485 (unsigned long long)paddr,
28486 (unsigned long long)(paddr + size - 1),
28487@@ -627,7 +627,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
28488 free_memtype(paddr, paddr + size);
28489 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
28490 " for [mem %#010Lx-%#010Lx], got %s\n",
28491- current->comm, current->pid,
28492+ current->comm, task_pid_nr(current),
28493 cattr_name(want_flags),
28494 (unsigned long long)paddr,
28495 (unsigned long long)(paddr + size - 1),
28496diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
28497index 9f0614d..92ae64a 100644
28498--- a/arch/x86/mm/pf_in.c
28499+++ b/arch/x86/mm/pf_in.c
28500@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
28501 int i;
28502 enum reason_type rv = OTHERS;
28503
28504- p = (unsigned char *)ins_addr;
28505+ p = (unsigned char *)ktla_ktva(ins_addr);
28506 p += skip_prefix(p, &prf);
28507 p += get_opcode(p, &opcode);
28508
28509@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
28510 struct prefix_bits prf;
28511 int i;
28512
28513- p = (unsigned char *)ins_addr;
28514+ p = (unsigned char *)ktla_ktva(ins_addr);
28515 p += skip_prefix(p, &prf);
28516 p += get_opcode(p, &opcode);
28517
28518@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
28519 struct prefix_bits prf;
28520 int i;
28521
28522- p = (unsigned char *)ins_addr;
28523+ p = (unsigned char *)ktla_ktva(ins_addr);
28524 p += skip_prefix(p, &prf);
28525 p += get_opcode(p, &opcode);
28526
28527@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
28528 struct prefix_bits prf;
28529 int i;
28530
28531- p = (unsigned char *)ins_addr;
28532+ p = (unsigned char *)ktla_ktva(ins_addr);
28533 p += skip_prefix(p, &prf);
28534 p += get_opcode(p, &opcode);
28535 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
28536@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
28537 struct prefix_bits prf;
28538 int i;
28539
28540- p = (unsigned char *)ins_addr;
28541+ p = (unsigned char *)ktla_ktva(ins_addr);
28542 p += skip_prefix(p, &prf);
28543 p += get_opcode(p, &opcode);
28544 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
28545diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
28546index e27fbf8..8b56dc9 100644
28547--- a/arch/x86/mm/pgtable.c
28548+++ b/arch/x86/mm/pgtable.c
28549@@ -84,10 +84,64 @@ static inline void pgd_list_del(pgd_t *pgd)
28550 list_del(&page->lru);
28551 }
28552
28553-#define UNSHARED_PTRS_PER_PGD \
28554- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
28555+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28556+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
28557
28558+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
28559+{
28560+ unsigned int count = USER_PGD_PTRS;
28561
28562+ while (count--)
28563+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
28564+}
28565+#endif
28566+
28567+#ifdef CONFIG_PAX_PER_CPU_PGD
28568+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
28569+{
28570+ unsigned int count = USER_PGD_PTRS;
28571+
28572+ while (count--) {
28573+ pgd_t pgd;
28574+
28575+#ifdef CONFIG_X86_64
28576+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
28577+#else
28578+ pgd = *src++;
28579+#endif
28580+
28581+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
28582+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
28583+#endif
28584+
28585+ *dst++ = pgd;
28586+ }
28587+
28588+}
28589+#endif
28590+
28591+#ifdef CONFIG_X86_64
28592+#define pxd_t pud_t
28593+#define pyd_t pgd_t
28594+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
28595+#define pxd_free(mm, pud) pud_free((mm), (pud))
28596+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
28597+#define pyd_offset(mm, address) pgd_offset((mm), (address))
28598+#define PYD_SIZE PGDIR_SIZE
28599+#else
28600+#define pxd_t pmd_t
28601+#define pyd_t pud_t
28602+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
28603+#define pxd_free(mm, pud) pmd_free((mm), (pud))
28604+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
28605+#define pyd_offset(mm, address) pud_offset((mm), (address))
28606+#define PYD_SIZE PUD_SIZE
28607+#endif
28608+
28609+#ifdef CONFIG_PAX_PER_CPU_PGD
28610+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
28611+static inline void pgd_dtor(pgd_t *pgd) {}
28612+#else
28613 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
28614 {
28615 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
28616@@ -128,6 +182,7 @@ static void pgd_dtor(pgd_t *pgd)
28617 pgd_list_del(pgd);
28618 spin_unlock(&pgd_lock);
28619 }
28620+#endif
28621
28622 /*
28623 * List of all pgd's needed for non-PAE so it can invalidate entries
28624@@ -140,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
28625 * -- nyc
28626 */
28627
28628-#ifdef CONFIG_X86_PAE
28629+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
28630 /*
28631 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
28632 * updating the top-level pagetable entries to guarantee the
28633@@ -152,7 +207,7 @@ static void pgd_dtor(pgd_t *pgd)
28634 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
28635 * and initialize the kernel pmds here.
28636 */
28637-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
28638+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
28639
28640 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
28641 {
28642@@ -170,36 +225,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
28643 */
28644 flush_tlb_mm(mm);
28645 }
28646+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
28647+#define PREALLOCATED_PXDS USER_PGD_PTRS
28648 #else /* !CONFIG_X86_PAE */
28649
28650 /* No need to prepopulate any pagetable entries in non-PAE modes. */
28651-#define PREALLOCATED_PMDS 0
28652+#define PREALLOCATED_PXDS 0
28653
28654 #endif /* CONFIG_X86_PAE */
28655
28656-static void free_pmds(pmd_t *pmds[])
28657+static void free_pxds(pxd_t *pxds[])
28658 {
28659 int i;
28660
28661- for(i = 0; i < PREALLOCATED_PMDS; i++)
28662- if (pmds[i])
28663- free_page((unsigned long)pmds[i]);
28664+ for(i = 0; i < PREALLOCATED_PXDS; i++)
28665+ if (pxds[i])
28666+ free_page((unsigned long)pxds[i]);
28667 }
28668
28669-static int preallocate_pmds(pmd_t *pmds[])
28670+static int preallocate_pxds(pxd_t *pxds[])
28671 {
28672 int i;
28673 bool failed = false;
28674
28675- for(i = 0; i < PREALLOCATED_PMDS; i++) {
28676- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
28677- if (pmd == NULL)
28678+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
28679+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
28680+ if (pxd == NULL)
28681 failed = true;
28682- pmds[i] = pmd;
28683+ pxds[i] = pxd;
28684 }
28685
28686 if (failed) {
28687- free_pmds(pmds);
28688+ free_pxds(pxds);
28689 return -ENOMEM;
28690 }
28691
28692@@ -212,51 +269,55 @@ static int preallocate_pmds(pmd_t *pmds[])
28693 * preallocate which never got a corresponding vma will need to be
28694 * freed manually.
28695 */
28696-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
28697+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
28698 {
28699 int i;
28700
28701- for(i = 0; i < PREALLOCATED_PMDS; i++) {
28702+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
28703 pgd_t pgd = pgdp[i];
28704
28705 if (pgd_val(pgd) != 0) {
28706- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
28707+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
28708
28709- pgdp[i] = native_make_pgd(0);
28710+ set_pgd(pgdp + i, native_make_pgd(0));
28711
28712- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
28713- pmd_free(mm, pmd);
28714+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
28715+ pxd_free(mm, pxd);
28716 }
28717 }
28718 }
28719
28720-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
28721+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
28722 {
28723- pud_t *pud;
28724+ pyd_t *pyd;
28725 unsigned long addr;
28726 int i;
28727
28728- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
28729+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
28730 return;
28731
28732- pud = pud_offset(pgd, 0);
28733+#ifdef CONFIG_X86_64
28734+ pyd = pyd_offset(mm, 0L);
28735+#else
28736+ pyd = pyd_offset(pgd, 0L);
28737+#endif
28738
28739- for (addr = i = 0; i < PREALLOCATED_PMDS;
28740- i++, pud++, addr += PUD_SIZE) {
28741- pmd_t *pmd = pmds[i];
28742+ for (addr = i = 0; i < PREALLOCATED_PXDS;
28743+ i++, pyd++, addr += PYD_SIZE) {
28744+ pxd_t *pxd = pxds[i];
28745
28746 if (i >= KERNEL_PGD_BOUNDARY)
28747- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
28748- sizeof(pmd_t) * PTRS_PER_PMD);
28749+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
28750+ sizeof(pxd_t) * PTRS_PER_PMD);
28751
28752- pud_populate(mm, pud, pmd);
28753+ pyd_populate(mm, pyd, pxd);
28754 }
28755 }
28756
28757 pgd_t *pgd_alloc(struct mm_struct *mm)
28758 {
28759 pgd_t *pgd;
28760- pmd_t *pmds[PREALLOCATED_PMDS];
28761+ pxd_t *pxds[PREALLOCATED_PXDS];
28762
28763 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
28764
28765@@ -265,11 +326,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
28766
28767 mm->pgd = pgd;
28768
28769- if (preallocate_pmds(pmds) != 0)
28770+ if (preallocate_pxds(pxds) != 0)
28771 goto out_free_pgd;
28772
28773 if (paravirt_pgd_alloc(mm) != 0)
28774- goto out_free_pmds;
28775+ goto out_free_pxds;
28776
28777 /*
28778 * Make sure that pre-populating the pmds is atomic with
28779@@ -279,14 +340,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
28780 spin_lock(&pgd_lock);
28781
28782 pgd_ctor(mm, pgd);
28783- pgd_prepopulate_pmd(mm, pgd, pmds);
28784+ pgd_prepopulate_pxd(mm, pgd, pxds);
28785
28786 spin_unlock(&pgd_lock);
28787
28788 return pgd;
28789
28790-out_free_pmds:
28791- free_pmds(pmds);
28792+out_free_pxds:
28793+ free_pxds(pxds);
28794 out_free_pgd:
28795 free_page((unsigned long)pgd);
28796 out:
28797@@ -295,7 +356,7 @@ out:
28798
28799 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
28800 {
28801- pgd_mop_up_pmds(mm, pgd);
28802+ pgd_mop_up_pxds(mm, pgd);
28803 pgd_dtor(pgd);
28804 paravirt_pgd_free(mm, pgd);
28805 free_page((unsigned long)pgd);
28806diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
28807index a69bcb8..19068ab 100644
28808--- a/arch/x86/mm/pgtable_32.c
28809+++ b/arch/x86/mm/pgtable_32.c
28810@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
28811 return;
28812 }
28813 pte = pte_offset_kernel(pmd, vaddr);
28814+
28815+ pax_open_kernel();
28816 if (pte_val(pteval))
28817 set_pte_at(&init_mm, vaddr, pte, pteval);
28818 else
28819 pte_clear(&init_mm, vaddr, pte);
28820+ pax_close_kernel();
28821
28822 /*
28823 * It's enough to flush this one mapping.
28824diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
28825index 410531d..0f16030 100644
28826--- a/arch/x86/mm/setup_nx.c
28827+++ b/arch/x86/mm/setup_nx.c
28828@@ -5,8 +5,10 @@
28829 #include <asm/pgtable.h>
28830 #include <asm/proto.h>
28831
28832+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28833 static int disable_nx __cpuinitdata;
28834
28835+#ifndef CONFIG_PAX_PAGEEXEC
28836 /*
28837 * noexec = on|off
28838 *
28839@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
28840 return 0;
28841 }
28842 early_param("noexec", noexec_setup);
28843+#endif
28844+
28845+#endif
28846
28847 void __cpuinit x86_configure_nx(void)
28848 {
28849+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
28850 if (cpu_has_nx && !disable_nx)
28851 __supported_pte_mask |= _PAGE_NX;
28852 else
28853+#endif
28854 __supported_pte_mask &= ~_PAGE_NX;
28855 }
28856
28857diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
28858index 13a6b29..c2fff23 100644
28859--- a/arch/x86/mm/tlb.c
28860+++ b/arch/x86/mm/tlb.c
28861@@ -48,7 +48,11 @@ void leave_mm(int cpu)
28862 BUG();
28863 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
28864 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
28865+
28866+#ifndef CONFIG_PAX_PER_CPU_PGD
28867 load_cr3(swapper_pg_dir);
28868+#endif
28869+
28870 }
28871 }
28872 EXPORT_SYMBOL_GPL(leave_mm);
28873diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
28874index 877b9a1..a8ecf42 100644
28875--- a/arch/x86/net/bpf_jit.S
28876+++ b/arch/x86/net/bpf_jit.S
28877@@ -9,6 +9,7 @@
28878 */
28879 #include <linux/linkage.h>
28880 #include <asm/dwarf2.h>
28881+#include <asm/alternative-asm.h>
28882
28883 /*
28884 * Calling convention :
28885@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
28886 jle bpf_slow_path_word
28887 mov (SKBDATA,%rsi),%eax
28888 bswap %eax /* ntohl() */
28889+ pax_force_retaddr
28890 ret
28891
28892 sk_load_half:
28893@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
28894 jle bpf_slow_path_half
28895 movzwl (SKBDATA,%rsi),%eax
28896 rol $8,%ax # ntohs()
28897+ pax_force_retaddr
28898 ret
28899
28900 sk_load_byte:
28901@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
28902 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
28903 jle bpf_slow_path_byte
28904 movzbl (SKBDATA,%rsi),%eax
28905+ pax_force_retaddr
28906 ret
28907
28908 /**
28909@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
28910 movzbl (SKBDATA,%rsi),%ebx
28911 and $15,%bl
28912 shl $2,%bl
28913+ pax_force_retaddr
28914 ret
28915
28916 /* rsi contains offset and can be scratched */
28917@@ -109,6 +114,7 @@ bpf_slow_path_word:
28918 js bpf_error
28919 mov -12(%rbp),%eax
28920 bswap %eax
28921+ pax_force_retaddr
28922 ret
28923
28924 bpf_slow_path_half:
28925@@ -117,12 +123,14 @@ bpf_slow_path_half:
28926 mov -12(%rbp),%ax
28927 rol $8,%ax
28928 movzwl %ax,%eax
28929+ pax_force_retaddr
28930 ret
28931
28932 bpf_slow_path_byte:
28933 bpf_slow_path_common(1)
28934 js bpf_error
28935 movzbl -12(%rbp),%eax
28936+ pax_force_retaddr
28937 ret
28938
28939 bpf_slow_path_byte_msh:
28940@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
28941 and $15,%al
28942 shl $2,%al
28943 xchg %eax,%ebx
28944+ pax_force_retaddr
28945 ret
28946
28947 #define sk_negative_common(SIZE) \
28948@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
28949 sk_negative_common(4)
28950 mov (%rax), %eax
28951 bswap %eax
28952+ pax_force_retaddr
28953 ret
28954
28955 bpf_slow_path_half_neg:
28956@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
28957 mov (%rax),%ax
28958 rol $8,%ax
28959 movzwl %ax,%eax
28960+ pax_force_retaddr
28961 ret
28962
28963 bpf_slow_path_byte_neg:
28964@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
28965 .globl sk_load_byte_negative_offset
28966 sk_negative_common(1)
28967 movzbl (%rax), %eax
28968+ pax_force_retaddr
28969 ret
28970
28971 bpf_slow_path_byte_msh_neg:
28972@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
28973 and $15,%al
28974 shl $2,%al
28975 xchg %eax,%ebx
28976+ pax_force_retaddr
28977 ret
28978
28979 bpf_error:
28980@@ -197,4 +210,5 @@ bpf_error:
28981 xor %eax,%eax
28982 mov -8(%rbp),%rbx
28983 leaveq
28984+ pax_force_retaddr
28985 ret
28986diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
28987index d11a470..3f9adff3 100644
28988--- a/arch/x86/net/bpf_jit_comp.c
28989+++ b/arch/x86/net/bpf_jit_comp.c
28990@@ -12,6 +12,7 @@
28991 #include <linux/netdevice.h>
28992 #include <linux/filter.h>
28993 #include <linux/if_vlan.h>
28994+#include <linux/random.h>
28995
28996 /*
28997 * Conventions :
28998@@ -49,13 +50,87 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
28999 return ptr + len;
29000 }
29001
29002+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29003+#define MAX_INSTR_CODE_SIZE 96
29004+#else
29005+#define MAX_INSTR_CODE_SIZE 64
29006+#endif
29007+
29008 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29009
29010 #define EMIT1(b1) EMIT(b1, 1)
29011 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29012 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29013 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29014+
29015+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29016+/* original constant will appear in ecx */
29017+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29018+do { \
29019+ /* mov ecx, randkey */ \
29020+ EMIT1(0xb9); \
29021+ EMIT(_key, 4); \
29022+ /* xor ecx, randkey ^ off */ \
29023+ EMIT2(0x81, 0xf1); \
29024+ EMIT((_key) ^ (_off), 4); \
29025+} while (0)
29026+
29027+#define EMIT1_off32(b1, _off) \
29028+do { \
29029+ switch (b1) { \
29030+ case 0x05: /* add eax, imm32 */ \
29031+ case 0x2d: /* sub eax, imm32 */ \
29032+ case 0x25: /* and eax, imm32 */ \
29033+ case 0x0d: /* or eax, imm32 */ \
29034+ case 0xb8: /* mov eax, imm32 */ \
29035+ case 0x3d: /* cmp eax, imm32 */ \
29036+ case 0xa9: /* test eax, imm32 */ \
29037+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29038+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29039+ break; \
29040+ case 0xbb: /* mov ebx, imm32 */ \
29041+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29042+ /* mov ebx, ecx */ \
29043+ EMIT2(0x89, 0xcb); \
29044+ break; \
29045+ case 0xbe: /* mov esi, imm32 */ \
29046+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29047+ /* mov esi, ecx */ \
29048+ EMIT2(0x89, 0xce); \
29049+ break; \
29050+ case 0xe9: /* jmp rel imm32 */ \
29051+ EMIT1(b1); \
29052+ EMIT(_off, 4); \
29053+ /* prevent fall-through, we're not called if off = 0 */ \
29054+ EMIT(0xcccccccc, 4); \
29055+ EMIT(0xcccccccc, 4); \
29056+ break; \
29057+ default: \
29058+ EMIT1(b1); \
29059+ EMIT(_off, 4); \
29060+ } \
29061+} while (0)
29062+
29063+#define EMIT2_off32(b1, b2, _off) \
29064+do { \
29065+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29066+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29067+ EMIT(randkey, 4); \
29068+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29069+ EMIT((_off) - randkey, 4); \
29070+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29071+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29072+ /* imul eax, ecx */ \
29073+ EMIT3(0x0f, 0xaf, 0xc1); \
29074+ } else { \
29075+ EMIT2(b1, b2); \
29076+ EMIT(_off, 4); \
29077+ } \
29078+} while (0)
29079+#else
29080 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29081+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29082+#endif
29083
29084 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29085 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29086@@ -90,6 +165,24 @@ do { \
29087 #define X86_JBE 0x76
29088 #define X86_JA 0x77
29089
29090+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29091+#define APPEND_FLOW_VERIFY() \
29092+do { \
29093+ /* mov ecx, randkey */ \
29094+ EMIT1(0xb9); \
29095+ EMIT(randkey, 4); \
29096+ /* cmp ecx, randkey */ \
29097+ EMIT2(0x81, 0xf9); \
29098+ EMIT(randkey, 4); \
29099+ /* jz after 8 int 3s */ \
29100+ EMIT2(0x74, 0x08); \
29101+ EMIT(0xcccccccc, 4); \
29102+ EMIT(0xcccccccc, 4); \
29103+} while (0)
29104+#else
29105+#define APPEND_FLOW_VERIFY() do { } while (0)
29106+#endif
29107+
29108 #define EMIT_COND_JMP(op, offset) \
29109 do { \
29110 if (is_near(offset)) \
29111@@ -97,6 +190,7 @@ do { \
29112 else { \
29113 EMIT2(0x0f, op + 0x10); \
29114 EMIT(offset, 4); /* jxx .+off32 */ \
29115+ APPEND_FLOW_VERIFY(); \
29116 } \
29117 } while (0)
29118
29119@@ -121,12 +215,17 @@ static inline void bpf_flush_icache(void *start, void *end)
29120 set_fs(old_fs);
29121 }
29122
29123+struct bpf_jit_work {
29124+ struct work_struct work;
29125+ void *image;
29126+};
29127+
29128 #define CHOOSE_LOAD_FUNC(K, func) \
29129 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29130
29131 void bpf_jit_compile(struct sk_filter *fp)
29132 {
29133- u8 temp[64];
29134+ u8 temp[MAX_INSTR_CODE_SIZE];
29135 u8 *prog;
29136 unsigned int proglen, oldproglen = 0;
29137 int ilen, i;
29138@@ -139,6 +238,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29139 unsigned int *addrs;
29140 const struct sock_filter *filter = fp->insns;
29141 int flen = fp->len;
29142+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29143+ unsigned int randkey;
29144+#endif
29145
29146 if (!bpf_jit_enable)
29147 return;
29148@@ -147,11 +249,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29149 if (addrs == NULL)
29150 return;
29151
29152+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29153+ if (!fp->work)
29154+ goto out;
29155+
29156+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29157+ randkey = get_random_int();
29158+#endif
29159+
29160 /* Before first pass, make a rough estimation of addrs[]
29161- * each bpf instruction is translated to less than 64 bytes
29162+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29163 */
29164 for (proglen = 0, i = 0; i < flen; i++) {
29165- proglen += 64;
29166+ proglen += MAX_INSTR_CODE_SIZE;
29167 addrs[i] = proglen;
29168 }
29169 cleanup_addr = proglen; /* epilogue address */
29170@@ -261,10 +371,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29171 case BPF_S_ALU_MUL_K: /* A *= K */
29172 if (is_imm8(K))
29173 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29174- else {
29175- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29176- EMIT(K, 4);
29177- }
29178+ else
29179+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29180 break;
29181 case BPF_S_ALU_DIV_X: /* A /= X; */
29182 seen |= SEEN_XREG;
29183@@ -304,13 +412,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29184 break;
29185 case BPF_S_ALU_MOD_K: /* A %= K; */
29186 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29187+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29188+ DILUTE_CONST_SEQUENCE(K, randkey);
29189+#else
29190 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29191+#endif
29192 EMIT2(0xf7, 0xf1); /* div %ecx */
29193 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29194 break;
29195 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29196+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29197+ DILUTE_CONST_SEQUENCE(K, randkey);
29198+ // imul rax, rcx
29199+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29200+#else
29201 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29202 EMIT(K, 4);
29203+#endif
29204 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29205 break;
29206 case BPF_S_ALU_AND_X:
29207@@ -564,8 +682,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29208 if (is_imm8(K)) {
29209 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29210 } else {
29211- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29212- EMIT(K, 4);
29213+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29214 }
29215 } else {
29216 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29217@@ -648,17 +765,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29218 break;
29219 default:
29220 /* hmm, too complex filter, give up with jit compiler */
29221- goto out;
29222+ goto error;
29223 }
29224 ilen = prog - temp;
29225 if (image) {
29226 if (unlikely(proglen + ilen > oldproglen)) {
29227 pr_err("bpb_jit_compile fatal error\n");
29228- kfree(addrs);
29229- module_free(NULL, image);
29230- return;
29231+ module_free_exec(NULL, image);
29232+ goto error;
29233 }
29234+ pax_open_kernel();
29235 memcpy(image + proglen, temp, ilen);
29236+ pax_close_kernel();
29237 }
29238 proglen += ilen;
29239 addrs[i] = proglen;
29240@@ -679,11 +797,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29241 break;
29242 }
29243 if (proglen == oldproglen) {
29244- image = module_alloc(max_t(unsigned int,
29245- proglen,
29246- sizeof(struct work_struct)));
29247+ image = module_alloc_exec(proglen);
29248 if (!image)
29249- goto out;
29250+ goto error;
29251 }
29252 oldproglen = proglen;
29253 }
29254@@ -699,7 +815,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29255 bpf_flush_icache(image, image + proglen);
29256
29257 fp->bpf_func = (void *)image;
29258- }
29259+ } else
29260+error:
29261+ kfree(fp->work);
29262+
29263 out:
29264 kfree(addrs);
29265 return;
29266@@ -707,18 +826,20 @@ out:
29267
29268 static void jit_free_defer(struct work_struct *arg)
29269 {
29270- module_free(NULL, arg);
29271+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29272+ kfree(arg);
29273 }
29274
29275 /* run from softirq, we must use a work_struct to call
29276- * module_free() from process context
29277+ * module_free_exec() from process context
29278 */
29279 void bpf_jit_free(struct sk_filter *fp)
29280 {
29281 if (fp->bpf_func != sk_run_filter) {
29282- struct work_struct *work = (struct work_struct *)fp->bpf_func;
29283+ struct work_struct *work = &fp->work->work;
29284
29285 INIT_WORK(work, jit_free_defer);
29286+ fp->work->image = fp->bpf_func;
29287 schedule_work(work);
29288 }
29289 }
29290diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
29291index d6aa6e8..266395a 100644
29292--- a/arch/x86/oprofile/backtrace.c
29293+++ b/arch/x86/oprofile/backtrace.c
29294@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
29295 struct stack_frame_ia32 *fp;
29296 unsigned long bytes;
29297
29298- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29299+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29300 if (bytes != sizeof(bufhead))
29301 return NULL;
29302
29303- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
29304+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
29305
29306 oprofile_add_trace(bufhead[0].return_address);
29307
29308@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29309 struct stack_frame bufhead[2];
29310 unsigned long bytes;
29311
29312- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29313+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29314 if (bytes != sizeof(bufhead))
29315 return NULL;
29316
29317@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29318 {
29319 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29320
29321- if (!user_mode_vm(regs)) {
29322+ if (!user_mode(regs)) {
29323 unsigned long stack = kernel_stack_pointer(regs);
29324 if (depth)
29325 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29326diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
29327index 48768df..ba9143c 100644
29328--- a/arch/x86/oprofile/nmi_int.c
29329+++ b/arch/x86/oprofile/nmi_int.c
29330@@ -23,6 +23,7 @@
29331 #include <asm/nmi.h>
29332 #include <asm/msr.h>
29333 #include <asm/apic.h>
29334+#include <asm/pgtable.h>
29335
29336 #include "op_counter.h"
29337 #include "op_x86_model.h"
29338@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
29339 if (ret)
29340 return ret;
29341
29342- if (!model->num_virt_counters)
29343- model->num_virt_counters = model->num_counters;
29344+ if (!model->num_virt_counters) {
29345+ pax_open_kernel();
29346+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
29347+ pax_close_kernel();
29348+ }
29349
29350 mux_init(ops);
29351
29352diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
29353index b2b9443..be58856 100644
29354--- a/arch/x86/oprofile/op_model_amd.c
29355+++ b/arch/x86/oprofile/op_model_amd.c
29356@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
29357 num_counters = AMD64_NUM_COUNTERS;
29358 }
29359
29360- op_amd_spec.num_counters = num_counters;
29361- op_amd_spec.num_controls = num_counters;
29362- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29363+ pax_open_kernel();
29364+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
29365+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
29366+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29367+ pax_close_kernel();
29368
29369 return 0;
29370 }
29371diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
29372index d90528e..0127e2b 100644
29373--- a/arch/x86/oprofile/op_model_ppro.c
29374+++ b/arch/x86/oprofile/op_model_ppro.c
29375@@ -19,6 +19,7 @@
29376 #include <asm/msr.h>
29377 #include <asm/apic.h>
29378 #include <asm/nmi.h>
29379+#include <asm/pgtable.h>
29380
29381 #include "op_x86_model.h"
29382 #include "op_counter.h"
29383@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
29384
29385 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
29386
29387- op_arch_perfmon_spec.num_counters = num_counters;
29388- op_arch_perfmon_spec.num_controls = num_counters;
29389+ pax_open_kernel();
29390+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
29391+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
29392+ pax_close_kernel();
29393 }
29394
29395 static int arch_perfmon_init(struct oprofile_operations *ignore)
29396diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
29397index 71e8a67..6a313bb 100644
29398--- a/arch/x86/oprofile/op_x86_model.h
29399+++ b/arch/x86/oprofile/op_x86_model.h
29400@@ -52,7 +52,7 @@ struct op_x86_model_spec {
29401 void (*switch_ctrl)(struct op_x86_model_spec const *model,
29402 struct op_msrs const * const msrs);
29403 #endif
29404-};
29405+} __do_const;
29406
29407 struct op_counter_config;
29408
29409diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
29410index e9e6ed5..e47ae67 100644
29411--- a/arch/x86/pci/amd_bus.c
29412+++ b/arch/x86/pci/amd_bus.c
29413@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
29414 return NOTIFY_OK;
29415 }
29416
29417-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
29418+static struct notifier_block amd_cpu_notifier = {
29419 .notifier_call = amd_cpu_notify,
29420 };
29421
29422diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
29423index 372e9b8..e775a6c 100644
29424--- a/arch/x86/pci/irq.c
29425+++ b/arch/x86/pci/irq.c
29426@@ -50,7 +50,7 @@ struct irq_router {
29427 struct irq_router_handler {
29428 u16 vendor;
29429 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
29430-};
29431+} __do_const;
29432
29433 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
29434 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
29435@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
29436 return 0;
29437 }
29438
29439-static __initdata struct irq_router_handler pirq_routers[] = {
29440+static __initconst const struct irq_router_handler pirq_routers[] = {
29441 { PCI_VENDOR_ID_INTEL, intel_router_probe },
29442 { PCI_VENDOR_ID_AL, ali_router_probe },
29443 { PCI_VENDOR_ID_ITE, ite_router_probe },
29444@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
29445 static void __init pirq_find_router(struct irq_router *r)
29446 {
29447 struct irq_routing_table *rt = pirq_table;
29448- struct irq_router_handler *h;
29449+ const struct irq_router_handler *h;
29450
29451 #ifdef CONFIG_PCI_BIOS
29452 if (!rt->signature) {
29453@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
29454 return 0;
29455 }
29456
29457-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
29458+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
29459 {
29460 .callback = fix_broken_hp_bios_irq9,
29461 .ident = "HP Pavilion N5400 Series Laptop",
29462diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
29463index 6eb18c4..20d83de 100644
29464--- a/arch/x86/pci/mrst.c
29465+++ b/arch/x86/pci/mrst.c
29466@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
29467 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
29468 pci_mmcfg_late_init();
29469 pcibios_enable_irq = mrst_pci_irq_enable;
29470- pci_root_ops = pci_mrst_ops;
29471+ pax_open_kernel();
29472+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
29473+ pax_close_kernel();
29474 pci_soc_mode = 1;
29475 /* Continue with standard init */
29476 return 1;
29477diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
29478index c77b24a..c979855 100644
29479--- a/arch/x86/pci/pcbios.c
29480+++ b/arch/x86/pci/pcbios.c
29481@@ -79,7 +79,7 @@ union bios32 {
29482 static struct {
29483 unsigned long address;
29484 unsigned short segment;
29485-} bios32_indirect = { 0, __KERNEL_CS };
29486+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
29487
29488 /*
29489 * Returns the entry point for the given service, NULL on error
29490@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
29491 unsigned long length; /* %ecx */
29492 unsigned long entry; /* %edx */
29493 unsigned long flags;
29494+ struct desc_struct d, *gdt;
29495
29496 local_irq_save(flags);
29497- __asm__("lcall *(%%edi); cld"
29498+
29499+ gdt = get_cpu_gdt_table(smp_processor_id());
29500+
29501+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
29502+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29503+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
29504+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29505+
29506+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
29507 : "=a" (return_code),
29508 "=b" (address),
29509 "=c" (length),
29510 "=d" (entry)
29511 : "0" (service),
29512 "1" (0),
29513- "D" (&bios32_indirect));
29514+ "D" (&bios32_indirect),
29515+ "r"(__PCIBIOS_DS)
29516+ : "memory");
29517+
29518+ pax_open_kernel();
29519+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
29520+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
29521+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
29522+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
29523+ pax_close_kernel();
29524+
29525 local_irq_restore(flags);
29526
29527 switch (return_code) {
29528- case 0:
29529- return address + entry;
29530- case 0x80: /* Not present */
29531- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
29532- return 0;
29533- default: /* Shouldn't happen */
29534- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
29535- service, return_code);
29536+ case 0: {
29537+ int cpu;
29538+ unsigned char flags;
29539+
29540+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
29541+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
29542+ printk(KERN_WARNING "bios32_service: not valid\n");
29543 return 0;
29544+ }
29545+ address = address + PAGE_OFFSET;
29546+ length += 16UL; /* some BIOSs underreport this... */
29547+ flags = 4;
29548+ if (length >= 64*1024*1024) {
29549+ length >>= PAGE_SHIFT;
29550+ flags |= 8;
29551+ }
29552+
29553+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
29554+ gdt = get_cpu_gdt_table(cpu);
29555+ pack_descriptor(&d, address, length, 0x9b, flags);
29556+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
29557+ pack_descriptor(&d, address, length, 0x93, flags);
29558+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
29559+ }
29560+ return entry;
29561+ }
29562+ case 0x80: /* Not present */
29563+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
29564+ return 0;
29565+ default: /* Shouldn't happen */
29566+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
29567+ service, return_code);
29568+ return 0;
29569 }
29570 }
29571
29572 static struct {
29573 unsigned long address;
29574 unsigned short segment;
29575-} pci_indirect = { 0, __KERNEL_CS };
29576+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
29577
29578-static int pci_bios_present;
29579+static int pci_bios_present __read_only;
29580
29581 static int check_pcibios(void)
29582 {
29583@@ -131,11 +174,13 @@ static int check_pcibios(void)
29584 unsigned long flags, pcibios_entry;
29585
29586 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
29587- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
29588+ pci_indirect.address = pcibios_entry;
29589
29590 local_irq_save(flags);
29591- __asm__(
29592- "lcall *(%%edi); cld\n\t"
29593+ __asm__("movw %w6, %%ds\n\t"
29594+ "lcall *%%ss:(%%edi); cld\n\t"
29595+ "push %%ss\n\t"
29596+ "pop %%ds\n\t"
29597 "jc 1f\n\t"
29598 "xor %%ah, %%ah\n"
29599 "1:"
29600@@ -144,7 +189,8 @@ static int check_pcibios(void)
29601 "=b" (ebx),
29602 "=c" (ecx)
29603 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
29604- "D" (&pci_indirect)
29605+ "D" (&pci_indirect),
29606+ "r" (__PCIBIOS_DS)
29607 : "memory");
29608 local_irq_restore(flags);
29609
29610@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29611
29612 switch (len) {
29613 case 1:
29614- __asm__("lcall *(%%esi); cld\n\t"
29615+ __asm__("movw %w6, %%ds\n\t"
29616+ "lcall *%%ss:(%%esi); cld\n\t"
29617+ "push %%ss\n\t"
29618+ "pop %%ds\n\t"
29619 "jc 1f\n\t"
29620 "xor %%ah, %%ah\n"
29621 "1:"
29622@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29623 : "1" (PCIBIOS_READ_CONFIG_BYTE),
29624 "b" (bx),
29625 "D" ((long)reg),
29626- "S" (&pci_indirect));
29627+ "S" (&pci_indirect),
29628+ "r" (__PCIBIOS_DS));
29629 /*
29630 * Zero-extend the result beyond 8 bits, do not trust the
29631 * BIOS having done it:
29632@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29633 *value &= 0xff;
29634 break;
29635 case 2:
29636- __asm__("lcall *(%%esi); cld\n\t"
29637+ __asm__("movw %w6, %%ds\n\t"
29638+ "lcall *%%ss:(%%esi); cld\n\t"
29639+ "push %%ss\n\t"
29640+ "pop %%ds\n\t"
29641 "jc 1f\n\t"
29642 "xor %%ah, %%ah\n"
29643 "1:"
29644@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29645 : "1" (PCIBIOS_READ_CONFIG_WORD),
29646 "b" (bx),
29647 "D" ((long)reg),
29648- "S" (&pci_indirect));
29649+ "S" (&pci_indirect),
29650+ "r" (__PCIBIOS_DS));
29651 /*
29652 * Zero-extend the result beyond 16 bits, do not trust the
29653 * BIOS having done it:
29654@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29655 *value &= 0xffff;
29656 break;
29657 case 4:
29658- __asm__("lcall *(%%esi); cld\n\t"
29659+ __asm__("movw %w6, %%ds\n\t"
29660+ "lcall *%%ss:(%%esi); cld\n\t"
29661+ "push %%ss\n\t"
29662+ "pop %%ds\n\t"
29663 "jc 1f\n\t"
29664 "xor %%ah, %%ah\n"
29665 "1:"
29666@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
29667 : "1" (PCIBIOS_READ_CONFIG_DWORD),
29668 "b" (bx),
29669 "D" ((long)reg),
29670- "S" (&pci_indirect));
29671+ "S" (&pci_indirect),
29672+ "r" (__PCIBIOS_DS));
29673 break;
29674 }
29675
29676@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29677
29678 switch (len) {
29679 case 1:
29680- __asm__("lcall *(%%esi); cld\n\t"
29681+ __asm__("movw %w6, %%ds\n\t"
29682+ "lcall *%%ss:(%%esi); cld\n\t"
29683+ "push %%ss\n\t"
29684+ "pop %%ds\n\t"
29685 "jc 1f\n\t"
29686 "xor %%ah, %%ah\n"
29687 "1:"
29688@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29689 "c" (value),
29690 "b" (bx),
29691 "D" ((long)reg),
29692- "S" (&pci_indirect));
29693+ "S" (&pci_indirect),
29694+ "r" (__PCIBIOS_DS));
29695 break;
29696 case 2:
29697- __asm__("lcall *(%%esi); cld\n\t"
29698+ __asm__("movw %w6, %%ds\n\t"
29699+ "lcall *%%ss:(%%esi); cld\n\t"
29700+ "push %%ss\n\t"
29701+ "pop %%ds\n\t"
29702 "jc 1f\n\t"
29703 "xor %%ah, %%ah\n"
29704 "1:"
29705@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29706 "c" (value),
29707 "b" (bx),
29708 "D" ((long)reg),
29709- "S" (&pci_indirect));
29710+ "S" (&pci_indirect),
29711+ "r" (__PCIBIOS_DS));
29712 break;
29713 case 4:
29714- __asm__("lcall *(%%esi); cld\n\t"
29715+ __asm__("movw %w6, %%ds\n\t"
29716+ "lcall *%%ss:(%%esi); cld\n\t"
29717+ "push %%ss\n\t"
29718+ "pop %%ds\n\t"
29719 "jc 1f\n\t"
29720 "xor %%ah, %%ah\n"
29721 "1:"
29722@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
29723 "c" (value),
29724 "b" (bx),
29725 "D" ((long)reg),
29726- "S" (&pci_indirect));
29727+ "S" (&pci_indirect),
29728+ "r" (__PCIBIOS_DS));
29729 break;
29730 }
29731
29732@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
29733
29734 DBG("PCI: Fetching IRQ routing table... ");
29735 __asm__("push %%es\n\t"
29736+ "movw %w8, %%ds\n\t"
29737 "push %%ds\n\t"
29738 "pop %%es\n\t"
29739- "lcall *(%%esi); cld\n\t"
29740+ "lcall *%%ss:(%%esi); cld\n\t"
29741 "pop %%es\n\t"
29742+ "push %%ss\n\t"
29743+ "pop %%ds\n"
29744 "jc 1f\n\t"
29745 "xor %%ah, %%ah\n"
29746 "1:"
29747@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
29748 "1" (0),
29749 "D" ((long) &opt),
29750 "S" (&pci_indirect),
29751- "m" (opt)
29752+ "m" (opt),
29753+ "r" (__PCIBIOS_DS)
29754 : "memory");
29755 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
29756 if (ret & 0xff00)
29757@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
29758 {
29759 int ret;
29760
29761- __asm__("lcall *(%%esi); cld\n\t"
29762+ __asm__("movw %w5, %%ds\n\t"
29763+ "lcall *%%ss:(%%esi); cld\n\t"
29764+ "push %%ss\n\t"
29765+ "pop %%ds\n"
29766 "jc 1f\n\t"
29767 "xor %%ah, %%ah\n"
29768 "1:"
29769@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
29770 : "0" (PCIBIOS_SET_PCI_HW_INT),
29771 "b" ((dev->bus->number << 8) | dev->devfn),
29772 "c" ((irq << 8) | (pin + 10)),
29773- "S" (&pci_indirect));
29774+ "S" (&pci_indirect),
29775+ "r" (__PCIBIOS_DS));
29776 return !(ret & 0xff00);
29777 }
29778 EXPORT_SYMBOL(pcibios_set_irq_routing);
29779diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
29780index 40e4469..1ab536e 100644
29781--- a/arch/x86/platform/efi/efi_32.c
29782+++ b/arch/x86/platform/efi/efi_32.c
29783@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
29784 {
29785 struct desc_ptr gdt_descr;
29786
29787+#ifdef CONFIG_PAX_KERNEXEC
29788+ struct desc_struct d;
29789+#endif
29790+
29791 local_irq_save(efi_rt_eflags);
29792
29793 load_cr3(initial_page_table);
29794 __flush_tlb_all();
29795
29796+#ifdef CONFIG_PAX_KERNEXEC
29797+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
29798+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
29799+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
29800+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
29801+#endif
29802+
29803 gdt_descr.address = __pa(get_cpu_gdt_table(0));
29804 gdt_descr.size = GDT_SIZE - 1;
29805 load_gdt(&gdt_descr);
29806@@ -58,6 +69,14 @@ void efi_call_phys_epilog(void)
29807 {
29808 struct desc_ptr gdt_descr;
29809
29810+#ifdef CONFIG_PAX_KERNEXEC
29811+ struct desc_struct d;
29812+
29813+ memset(&d, 0, sizeof d);
29814+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
29815+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
29816+#endif
29817+
29818 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
29819 gdt_descr.size = GDT_SIZE - 1;
29820 load_gdt(&gdt_descr);
29821diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
29822index fbe66e6..eae5e38 100644
29823--- a/arch/x86/platform/efi/efi_stub_32.S
29824+++ b/arch/x86/platform/efi/efi_stub_32.S
29825@@ -6,7 +6,9 @@
29826 */
29827
29828 #include <linux/linkage.h>
29829+#include <linux/init.h>
29830 #include <asm/page_types.h>
29831+#include <asm/segment.h>
29832
29833 /*
29834 * efi_call_phys(void *, ...) is a function with variable parameters.
29835@@ -20,7 +22,7 @@
29836 * service functions will comply with gcc calling convention, too.
29837 */
29838
29839-.text
29840+__INIT
29841 ENTRY(efi_call_phys)
29842 /*
29843 * 0. The function can only be called in Linux kernel. So CS has been
29844@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
29845 * The mapping of lower virtual memory has been created in prelog and
29846 * epilog.
29847 */
29848- movl $1f, %edx
29849- subl $__PAGE_OFFSET, %edx
29850- jmp *%edx
29851+#ifdef CONFIG_PAX_KERNEXEC
29852+ movl $(__KERNEXEC_EFI_DS), %edx
29853+ mov %edx, %ds
29854+ mov %edx, %es
29855+ mov %edx, %ss
29856+ addl $2f,(1f)
29857+ ljmp *(1f)
29858+
29859+__INITDATA
29860+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
29861+.previous
29862+
29863+2:
29864+ subl $2b,(1b)
29865+#else
29866+ jmp 1f-__PAGE_OFFSET
29867 1:
29868+#endif
29869
29870 /*
29871 * 2. Now on the top of stack is the return
29872@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
29873 * parameter 2, ..., param n. To make things easy, we save the return
29874 * address of efi_call_phys in a global variable.
29875 */
29876- popl %edx
29877- movl %edx, saved_return_addr
29878- /* get the function pointer into ECX*/
29879- popl %ecx
29880- movl %ecx, efi_rt_function_ptr
29881- movl $2f, %edx
29882- subl $__PAGE_OFFSET, %edx
29883- pushl %edx
29884+ popl (saved_return_addr)
29885+ popl (efi_rt_function_ptr)
29886
29887 /*
29888 * 3. Clear PG bit in %CR0.
29889@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
29890 /*
29891 * 5. Call the physical function.
29892 */
29893- jmp *%ecx
29894+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
29895
29896-2:
29897 /*
29898 * 6. After EFI runtime service returns, control will return to
29899 * following instruction. We'd better readjust stack pointer first.
29900@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
29901 movl %cr0, %edx
29902 orl $0x80000000, %edx
29903 movl %edx, %cr0
29904- jmp 1f
29905-1:
29906+
29907 /*
29908 * 8. Now restore the virtual mode from flat mode by
29909 * adding EIP with PAGE_OFFSET.
29910 */
29911- movl $1f, %edx
29912- jmp *%edx
29913+#ifdef CONFIG_PAX_KERNEXEC
29914+ movl $(__KERNEL_DS), %edx
29915+ mov %edx, %ds
29916+ mov %edx, %es
29917+ mov %edx, %ss
29918+ ljmp $(__KERNEL_CS),$1f
29919+#else
29920+ jmp 1f+__PAGE_OFFSET
29921+#endif
29922 1:
29923
29924 /*
29925 * 9. Balance the stack. And because EAX contain the return value,
29926 * we'd better not clobber it.
29927 */
29928- leal efi_rt_function_ptr, %edx
29929- movl (%edx), %ecx
29930- pushl %ecx
29931+ pushl (efi_rt_function_ptr)
29932
29933 /*
29934- * 10. Push the saved return address onto the stack and return.
29935+ * 10. Return to the saved return address.
29936 */
29937- leal saved_return_addr, %edx
29938- movl (%edx), %ecx
29939- pushl %ecx
29940- ret
29941+ jmpl *(saved_return_addr)
29942 ENDPROC(efi_call_phys)
29943 .previous
29944
29945-.data
29946+__INITDATA
29947 saved_return_addr:
29948 .long 0
29949 efi_rt_function_ptr:
29950diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
29951index 4c07cca..2c8427d 100644
29952--- a/arch/x86/platform/efi/efi_stub_64.S
29953+++ b/arch/x86/platform/efi/efi_stub_64.S
29954@@ -7,6 +7,7 @@
29955 */
29956
29957 #include <linux/linkage.h>
29958+#include <asm/alternative-asm.h>
29959
29960 #define SAVE_XMM \
29961 mov %rsp, %rax; \
29962@@ -40,6 +41,7 @@ ENTRY(efi_call0)
29963 call *%rdi
29964 addq $32, %rsp
29965 RESTORE_XMM
29966+ pax_force_retaddr 0, 1
29967 ret
29968 ENDPROC(efi_call0)
29969
29970@@ -50,6 +52,7 @@ ENTRY(efi_call1)
29971 call *%rdi
29972 addq $32, %rsp
29973 RESTORE_XMM
29974+ pax_force_retaddr 0, 1
29975 ret
29976 ENDPROC(efi_call1)
29977
29978@@ -60,6 +63,7 @@ ENTRY(efi_call2)
29979 call *%rdi
29980 addq $32, %rsp
29981 RESTORE_XMM
29982+ pax_force_retaddr 0, 1
29983 ret
29984 ENDPROC(efi_call2)
29985
29986@@ -71,6 +75,7 @@ ENTRY(efi_call3)
29987 call *%rdi
29988 addq $32, %rsp
29989 RESTORE_XMM
29990+ pax_force_retaddr 0, 1
29991 ret
29992 ENDPROC(efi_call3)
29993
29994@@ -83,6 +88,7 @@ ENTRY(efi_call4)
29995 call *%rdi
29996 addq $32, %rsp
29997 RESTORE_XMM
29998+ pax_force_retaddr 0, 1
29999 ret
30000 ENDPROC(efi_call4)
30001
30002@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30003 call *%rdi
30004 addq $48, %rsp
30005 RESTORE_XMM
30006+ pax_force_retaddr 0, 1
30007 ret
30008 ENDPROC(efi_call5)
30009
30010@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30011 call *%rdi
30012 addq $48, %rsp
30013 RESTORE_XMM
30014+ pax_force_retaddr 0, 1
30015 ret
30016 ENDPROC(efi_call6)
30017diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30018index e31bcd8..f12dc46 100644
30019--- a/arch/x86/platform/mrst/mrst.c
30020+++ b/arch/x86/platform/mrst/mrst.c
30021@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30022 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30023 int sfi_mrtc_num;
30024
30025-static void mrst_power_off(void)
30026+static __noreturn void mrst_power_off(void)
30027 {
30028+ BUG();
30029 }
30030
30031-static void mrst_reboot(void)
30032+static __noreturn void mrst_reboot(void)
30033 {
30034 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30035+ BUG();
30036 }
30037
30038 /* parse all the mtimer info to a static mtimer array */
30039diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30040index d6ee929..3637cb5 100644
30041--- a/arch/x86/platform/olpc/olpc_dt.c
30042+++ b/arch/x86/platform/olpc/olpc_dt.c
30043@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30044 return res;
30045 }
30046
30047-static struct of_pdt_ops prom_olpc_ops __initdata = {
30048+static struct of_pdt_ops prom_olpc_ops __initconst = {
30049 .nextprop = olpc_dt_nextprop,
30050 .getproplen = olpc_dt_getproplen,
30051 .getproperty = olpc_dt_getproperty,
30052diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30053index 120cee1..b2db75a 100644
30054--- a/arch/x86/power/cpu.c
30055+++ b/arch/x86/power/cpu.c
30056@@ -133,7 +133,7 @@ static void do_fpu_end(void)
30057 static void fix_processor_context(void)
30058 {
30059 int cpu = smp_processor_id();
30060- struct tss_struct *t = &per_cpu(init_tss, cpu);
30061+ struct tss_struct *t = init_tss + cpu;
30062
30063 set_tss_desc(cpu, t); /*
30064 * This just modifies memory; should not be
30065@@ -143,8 +143,6 @@ static void fix_processor_context(void)
30066 */
30067
30068 #ifdef CONFIG_X86_64
30069- get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
30070-
30071 syscall_init(); /* This sets MSR_*STAR and related */
30072 #endif
30073 load_TR_desc(); /* This does ltr */
30074diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30075index cbca565..bae7133 100644
30076--- a/arch/x86/realmode/init.c
30077+++ b/arch/x86/realmode/init.c
30078@@ -62,7 +62,13 @@ void __init setup_real_mode(void)
30079 __va(real_mode_header->trampoline_header);
30080
30081 #ifdef CONFIG_X86_32
30082- trampoline_header->start = __pa(startup_32_smp);
30083+ trampoline_header->start = __pa(ktla_ktva(startup_32_smp));
30084+
30085+#ifdef CONFIG_PAX_KERNEXEC
30086+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30087+#endif
30088+
30089+ trampoline_header->boot_cs = __BOOT_CS;
30090 trampoline_header->gdt_limit = __BOOT_DS + 7;
30091 trampoline_header->gdt_base = __pa(boot_gdt);
30092 #else
30093diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30094index 8869287..d577672 100644
30095--- a/arch/x86/realmode/rm/Makefile
30096+++ b/arch/x86/realmode/rm/Makefile
30097@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30098 $(call cc-option, -fno-unit-at-a-time)) \
30099 $(call cc-option, -fno-stack-protector) \
30100 $(call cc-option, -mpreferred-stack-boundary=2)
30101+ifdef CONSTIFY_PLUGIN
30102+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30103+endif
30104 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30105 GCOV_PROFILE := n
30106diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30107index a28221d..93c40f1 100644
30108--- a/arch/x86/realmode/rm/header.S
30109+++ b/arch/x86/realmode/rm/header.S
30110@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30111 #endif
30112 /* APM/BIOS reboot */
30113 .long pa_machine_real_restart_asm
30114-#ifdef CONFIG_X86_64
30115+#ifdef CONFIG_X86_32
30116+ .long __KERNEL_CS
30117+#else
30118 .long __KERNEL32_CS
30119 #endif
30120 END(real_mode_header)
30121diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30122index c1b2791..f9e31c7 100644
30123--- a/arch/x86/realmode/rm/trampoline_32.S
30124+++ b/arch/x86/realmode/rm/trampoline_32.S
30125@@ -25,6 +25,12 @@
30126 #include <asm/page_types.h>
30127 #include "realmode.h"
30128
30129+#ifdef CONFIG_PAX_KERNEXEC
30130+#define ta(X) (X)
30131+#else
30132+#define ta(X) (pa_ ## X)
30133+#endif
30134+
30135 .text
30136 .code16
30137
30138@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30139
30140 cli # We should be safe anyway
30141
30142- movl tr_start, %eax # where we need to go
30143-
30144 movl $0xA5A5A5A5, trampoline_status
30145 # write marker for master knows we're running
30146
30147@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30148 movw $1, %dx # protected mode (PE) bit
30149 lmsw %dx # into protected mode
30150
30151- ljmpl $__BOOT_CS, $pa_startup_32
30152+ ljmpl *(trampoline_header)
30153
30154 .section ".text32","ax"
30155 .code32
30156@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30157 .balign 8
30158 GLOBAL(trampoline_header)
30159 tr_start: .space 4
30160- tr_gdt_pad: .space 2
30161+ tr_boot_cs: .space 2
30162 tr_gdt: .space 6
30163 END(trampoline_header)
30164
30165diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30166index bb360dc..3e5945f 100644
30167--- a/arch/x86/realmode/rm/trampoline_64.S
30168+++ b/arch/x86/realmode/rm/trampoline_64.S
30169@@ -107,7 +107,7 @@ ENTRY(startup_32)
30170 wrmsr
30171
30172 # Enable paging and in turn activate Long Mode
30173- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30174+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
30175 movl %eax, %cr0
30176
30177 /*
30178diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30179index 79d67bd..c7e1b90 100644
30180--- a/arch/x86/tools/relocs.c
30181+++ b/arch/x86/tools/relocs.c
30182@@ -12,10 +12,13 @@
30183 #include <regex.h>
30184 #include <tools/le_byteshift.h>
30185
30186+#include "../../../include/generated/autoconf.h"
30187+
30188 static void die(char *fmt, ...);
30189
30190 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
30191 static Elf32_Ehdr ehdr;
30192+static Elf32_Phdr *phdr;
30193 static unsigned long reloc_count, reloc_idx;
30194 static unsigned long *relocs;
30195 static unsigned long reloc16_count, reloc16_idx;
30196@@ -330,9 +333,39 @@ static void read_ehdr(FILE *fp)
30197 }
30198 }
30199
30200+static void read_phdrs(FILE *fp)
30201+{
30202+ unsigned int i;
30203+
30204+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
30205+ if (!phdr) {
30206+ die("Unable to allocate %d program headers\n",
30207+ ehdr.e_phnum);
30208+ }
30209+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30210+ die("Seek to %d failed: %s\n",
30211+ ehdr.e_phoff, strerror(errno));
30212+ }
30213+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30214+ die("Cannot read ELF program headers: %s\n",
30215+ strerror(errno));
30216+ }
30217+ for(i = 0; i < ehdr.e_phnum; i++) {
30218+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
30219+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
30220+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
30221+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
30222+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
30223+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
30224+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
30225+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
30226+ }
30227+
30228+}
30229+
30230 static void read_shdrs(FILE *fp)
30231 {
30232- int i;
30233+ unsigned int i;
30234 Elf32_Shdr shdr;
30235
30236 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30237@@ -367,7 +400,7 @@ static void read_shdrs(FILE *fp)
30238
30239 static void read_strtabs(FILE *fp)
30240 {
30241- int i;
30242+ unsigned int i;
30243 for (i = 0; i < ehdr.e_shnum; i++) {
30244 struct section *sec = &secs[i];
30245 if (sec->shdr.sh_type != SHT_STRTAB) {
30246@@ -392,7 +425,7 @@ static void read_strtabs(FILE *fp)
30247
30248 static void read_symtabs(FILE *fp)
30249 {
30250- int i,j;
30251+ unsigned int i,j;
30252 for (i = 0; i < ehdr.e_shnum; i++) {
30253 struct section *sec = &secs[i];
30254 if (sec->shdr.sh_type != SHT_SYMTAB) {
30255@@ -423,9 +456,11 @@ static void read_symtabs(FILE *fp)
30256 }
30257
30258
30259-static void read_relocs(FILE *fp)
30260+static void read_relocs(FILE *fp, int use_real_mode)
30261 {
30262- int i,j;
30263+ unsigned int i,j;
30264+ uint32_t base;
30265+
30266 for (i = 0; i < ehdr.e_shnum; i++) {
30267 struct section *sec = &secs[i];
30268 if (sec->shdr.sh_type != SHT_REL) {
30269@@ -445,9 +480,22 @@ static void read_relocs(FILE *fp)
30270 die("Cannot read symbol table: %s\n",
30271 strerror(errno));
30272 }
30273+ base = 0;
30274+
30275+#ifdef CONFIG_X86_32
30276+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
30277+ if (phdr[j].p_type != PT_LOAD )
30278+ continue;
30279+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
30280+ continue;
30281+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
30282+ break;
30283+ }
30284+#endif
30285+
30286 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
30287 Elf32_Rel *rel = &sec->reltab[j];
30288- rel->r_offset = elf32_to_cpu(rel->r_offset);
30289+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
30290 rel->r_info = elf32_to_cpu(rel->r_info);
30291 }
30292 }
30293@@ -456,13 +504,13 @@ static void read_relocs(FILE *fp)
30294
30295 static void print_absolute_symbols(void)
30296 {
30297- int i;
30298+ unsigned int i;
30299 printf("Absolute symbols\n");
30300 printf(" Num: Value Size Type Bind Visibility Name\n");
30301 for (i = 0; i < ehdr.e_shnum; i++) {
30302 struct section *sec = &secs[i];
30303 char *sym_strtab;
30304- int j;
30305+ unsigned int j;
30306
30307 if (sec->shdr.sh_type != SHT_SYMTAB) {
30308 continue;
30309@@ -489,14 +537,14 @@ static void print_absolute_symbols(void)
30310
30311 static void print_absolute_relocs(void)
30312 {
30313- int i, printed = 0;
30314+ unsigned int i, printed = 0;
30315
30316 for (i = 0; i < ehdr.e_shnum; i++) {
30317 struct section *sec = &secs[i];
30318 struct section *sec_applies, *sec_symtab;
30319 char *sym_strtab;
30320 Elf32_Sym *sh_symtab;
30321- int j;
30322+ unsigned int j;
30323 if (sec->shdr.sh_type != SHT_REL) {
30324 continue;
30325 }
30326@@ -558,13 +606,13 @@ static void print_absolute_relocs(void)
30327 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30328 int use_real_mode)
30329 {
30330- int i;
30331+ unsigned int i;
30332 /* Walk through the relocations */
30333 for (i = 0; i < ehdr.e_shnum; i++) {
30334 char *sym_strtab;
30335 Elf32_Sym *sh_symtab;
30336 struct section *sec_applies, *sec_symtab;
30337- int j;
30338+ unsigned int j;
30339 struct section *sec = &secs[i];
30340
30341 if (sec->shdr.sh_type != SHT_REL) {
30342@@ -588,6 +636,24 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym),
30343 sym = &sh_symtab[ELF32_R_SYM(rel->r_info)];
30344 r_type = ELF32_R_TYPE(rel->r_info);
30345
30346+ if (!use_real_mode) {
30347+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
30348+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
30349+ continue;
30350+
30351+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
30352+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
30353+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
30354+ continue;
30355+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
30356+ continue;
30357+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
30358+ continue;
30359+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
30360+ continue;
30361+#endif
30362+ }
30363+
30364 shn_abs = sym->st_shndx == SHN_ABS;
30365
30366 switch (r_type) {
30367@@ -681,7 +747,7 @@ static int write32(unsigned int v, FILE *f)
30368
30369 static void emit_relocs(int as_text, int use_real_mode)
30370 {
30371- int i;
30372+ unsigned int i;
30373 /* Count how many relocations I have and allocate space for them. */
30374 reloc_count = 0;
30375 walk_relocs(count_reloc, use_real_mode);
30376@@ -808,10 +874,11 @@ int main(int argc, char **argv)
30377 fname, strerror(errno));
30378 }
30379 read_ehdr(fp);
30380+ read_phdrs(fp);
30381 read_shdrs(fp);
30382 read_strtabs(fp);
30383 read_symtabs(fp);
30384- read_relocs(fp);
30385+ read_relocs(fp, use_real_mode);
30386 if (show_absolute_syms) {
30387 print_absolute_symbols();
30388 goto out;
30389diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
30390index fd14be1..e3c79c0 100644
30391--- a/arch/x86/vdso/Makefile
30392+++ b/arch/x86/vdso/Makefile
30393@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
30394 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
30395 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
30396
30397-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30398+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
30399 GCOV_PROFILE := n
30400
30401 #
30402diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
30403index 0faad64..39ef157 100644
30404--- a/arch/x86/vdso/vdso32-setup.c
30405+++ b/arch/x86/vdso/vdso32-setup.c
30406@@ -25,6 +25,7 @@
30407 #include <asm/tlbflush.h>
30408 #include <asm/vdso.h>
30409 #include <asm/proto.h>
30410+#include <asm/mman.h>
30411
30412 enum {
30413 VDSO_DISABLED = 0,
30414@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
30415 void enable_sep_cpu(void)
30416 {
30417 int cpu = get_cpu();
30418- struct tss_struct *tss = &per_cpu(init_tss, cpu);
30419+ struct tss_struct *tss = init_tss + cpu;
30420
30421 if (!boot_cpu_has(X86_FEATURE_SEP)) {
30422 put_cpu();
30423@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
30424 gate_vma.vm_start = FIXADDR_USER_START;
30425 gate_vma.vm_end = FIXADDR_USER_END;
30426 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
30427- gate_vma.vm_page_prot = __P101;
30428+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
30429
30430 return 0;
30431 }
30432@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30433 if (compat)
30434 addr = VDSO_HIGH_BASE;
30435 else {
30436- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
30437+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
30438 if (IS_ERR_VALUE(addr)) {
30439 ret = addr;
30440 goto up_fail;
30441 }
30442 }
30443
30444- current->mm->context.vdso = (void *)addr;
30445+ current->mm->context.vdso = addr;
30446
30447 if (compat_uses_vma || !compat) {
30448 /*
30449@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30450 }
30451
30452 current_thread_info()->sysenter_return =
30453- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30454+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
30455
30456 up_fail:
30457 if (ret)
30458- current->mm->context.vdso = NULL;
30459+ current->mm->context.vdso = 0;
30460
30461 up_write(&mm->mmap_sem);
30462
30463@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
30464
30465 const char *arch_vma_name(struct vm_area_struct *vma)
30466 {
30467- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
30468+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
30469 return "[vdso]";
30470+
30471+#ifdef CONFIG_PAX_SEGMEXEC
30472+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
30473+ return "[vdso]";
30474+#endif
30475+
30476 return NULL;
30477 }
30478
30479@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
30480 * Check to see if the corresponding task was created in compat vdso
30481 * mode.
30482 */
30483- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
30484+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
30485 return &gate_vma;
30486 return NULL;
30487 }
30488diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
30489index 431e875..cbb23f3 100644
30490--- a/arch/x86/vdso/vma.c
30491+++ b/arch/x86/vdso/vma.c
30492@@ -16,8 +16,6 @@
30493 #include <asm/vdso.h>
30494 #include <asm/page.h>
30495
30496-unsigned int __read_mostly vdso_enabled = 1;
30497-
30498 extern char vdso_start[], vdso_end[];
30499 extern unsigned short vdso_sync_cpuid;
30500
30501@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
30502 * unaligned here as a result of stack start randomization.
30503 */
30504 addr = PAGE_ALIGN(addr);
30505- addr = align_vdso_addr(addr);
30506
30507 return addr;
30508 }
30509@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
30510 unsigned size)
30511 {
30512 struct mm_struct *mm = current->mm;
30513- unsigned long addr;
30514+ unsigned long addr = 0;
30515 int ret;
30516
30517- if (!vdso_enabled)
30518- return 0;
30519-
30520 down_write(&mm->mmap_sem);
30521+
30522+#ifdef CONFIG_PAX_RANDMMAP
30523+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
30524+#endif
30525+
30526 addr = vdso_addr(mm->start_stack, size);
30527+ addr = align_vdso_addr(addr);
30528 addr = get_unmapped_area(NULL, addr, size, 0, 0);
30529 if (IS_ERR_VALUE(addr)) {
30530 ret = addr;
30531 goto up_fail;
30532 }
30533
30534- current->mm->context.vdso = (void *)addr;
30535+ mm->context.vdso = addr;
30536
30537 ret = install_special_mapping(mm, addr, size,
30538 VM_READ|VM_EXEC|
30539 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
30540 pages);
30541- if (ret) {
30542- current->mm->context.vdso = NULL;
30543- goto up_fail;
30544- }
30545+ if (ret)
30546+ mm->context.vdso = 0;
30547
30548 up_fail:
30549 up_write(&mm->mmap_sem);
30550@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
30551 vdsox32_size);
30552 }
30553 #endif
30554-
30555-static __init int vdso_setup(char *s)
30556-{
30557- vdso_enabled = simple_strtoul(s, NULL, 0);
30558- return 0;
30559-}
30560-__setup("vdso=", vdso_setup);
30561diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
30562index e014092..c76ab69 100644
30563--- a/arch/x86/xen/enlighten.c
30564+++ b/arch/x86/xen/enlighten.c
30565@@ -99,8 +99,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
30566
30567 struct shared_info xen_dummy_shared_info;
30568
30569-void *xen_initial_gdt;
30570-
30571 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
30572 __read_mostly int xen_have_vector_callback;
30573 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
30574@@ -495,8 +493,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
30575 {
30576 unsigned long va = dtr->address;
30577 unsigned int size = dtr->size + 1;
30578- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
30579- unsigned long frames[pages];
30580+ unsigned long frames[65536 / PAGE_SIZE];
30581 int f;
30582
30583 /*
30584@@ -544,8 +541,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
30585 {
30586 unsigned long va = dtr->address;
30587 unsigned int size = dtr->size + 1;
30588- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
30589- unsigned long frames[pages];
30590+ unsigned long frames[65536 / PAGE_SIZE];
30591 int f;
30592
30593 /*
30594@@ -938,7 +934,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
30595 return 0;
30596 }
30597
30598-static void set_xen_basic_apic_ops(void)
30599+static void __init set_xen_basic_apic_ops(void)
30600 {
30601 apic->read = xen_apic_read;
30602 apic->write = xen_apic_write;
30603@@ -1244,30 +1240,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
30604 #endif
30605 };
30606
30607-static void xen_reboot(int reason)
30608+static __noreturn void xen_reboot(int reason)
30609 {
30610 struct sched_shutdown r = { .reason = reason };
30611
30612- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
30613- BUG();
30614+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
30615+ BUG();
30616 }
30617
30618-static void xen_restart(char *msg)
30619+static __noreturn void xen_restart(char *msg)
30620 {
30621 xen_reboot(SHUTDOWN_reboot);
30622 }
30623
30624-static void xen_emergency_restart(void)
30625+static __noreturn void xen_emergency_restart(void)
30626 {
30627 xen_reboot(SHUTDOWN_reboot);
30628 }
30629
30630-static void xen_machine_halt(void)
30631+static __noreturn void xen_machine_halt(void)
30632 {
30633 xen_reboot(SHUTDOWN_poweroff);
30634 }
30635
30636-static void xen_machine_power_off(void)
30637+static __noreturn void xen_machine_power_off(void)
30638 {
30639 if (pm_power_off)
30640 pm_power_off();
30641@@ -1369,7 +1365,17 @@ asmlinkage void __init xen_start_kernel(void)
30642 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
30643
30644 /* Work out if we support NX */
30645- x86_configure_nx();
30646+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
30647+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
30648+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
30649+ unsigned l, h;
30650+
30651+ __supported_pte_mask |= _PAGE_NX;
30652+ rdmsr(MSR_EFER, l, h);
30653+ l |= EFER_NX;
30654+ wrmsr(MSR_EFER, l, h);
30655+ }
30656+#endif
30657
30658 xen_setup_features();
30659
30660@@ -1398,14 +1404,7 @@ asmlinkage void __init xen_start_kernel(void)
30661 pv_mmu_ops.ptep_modify_prot_commit = xen_ptep_modify_prot_commit;
30662 }
30663
30664- machine_ops = xen_machine_ops;
30665-
30666- /*
30667- * The only reliable way to retain the initial address of the
30668- * percpu gdt_page is to remember it here, so we can go and
30669- * mark it RW later, when the initial percpu area is freed.
30670- */
30671- xen_initial_gdt = &per_cpu(gdt_page, 0);
30672+ memcpy((void *)&machine_ops, &xen_machine_ops, sizeof machine_ops);
30673
30674 xen_smp_init();
30675
30676@@ -1590,7 +1589,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
30677 return NOTIFY_OK;
30678 }
30679
30680-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
30681+static struct notifier_block xen_hvm_cpu_notifier = {
30682 .notifier_call = xen_hvm_cpu_notify,
30683 };
30684
30685diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
30686index 01de35c..0bda07b 100644
30687--- a/arch/x86/xen/mmu.c
30688+++ b/arch/x86/xen/mmu.c
30689@@ -1881,6 +1881,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
30690 /* L3_k[510] -> level2_kernel_pgt
30691 * L3_i[511] -> level2_fixmap_pgt */
30692 convert_pfn_mfn(level3_kernel_pgt);
30693+ convert_pfn_mfn(level3_vmalloc_start_pgt);
30694+ convert_pfn_mfn(level3_vmalloc_end_pgt);
30695+ convert_pfn_mfn(level3_vmemmap_pgt);
30696
30697 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
30698 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
30699@@ -1910,8 +1913,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
30700 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
30701 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
30702 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
30703+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
30704+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
30705+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
30706 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
30707 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
30708+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
30709 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
30710 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
30711
30712@@ -2097,6 +2104,7 @@ static void __init xen_post_allocator_init(void)
30713 pv_mmu_ops.set_pud = xen_set_pud;
30714 #if PAGETABLE_LEVELS == 4
30715 pv_mmu_ops.set_pgd = xen_set_pgd;
30716+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
30717 #endif
30718
30719 /* This will work as long as patching hasn't happened yet
30720@@ -2178,6 +2186,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
30721 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
30722 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
30723 .set_pgd = xen_set_pgd_hyper,
30724+ .set_pgd_batched = xen_set_pgd_hyper,
30725
30726 .alloc_pud = xen_alloc_pmd_init,
30727 .release_pud = xen_release_pmd_init,
30728diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
30729index 34bc4ce..c34aa24 100644
30730--- a/arch/x86/xen/smp.c
30731+++ b/arch/x86/xen/smp.c
30732@@ -229,11 +229,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
30733 {
30734 BUG_ON(smp_processor_id() != 0);
30735 native_smp_prepare_boot_cpu();
30736-
30737- /* We've switched to the "real" per-cpu gdt, so make sure the
30738- old memory can be recycled */
30739- make_lowmem_page_readwrite(xen_initial_gdt);
30740-
30741 xen_filter_cpu_maps();
30742 xen_setup_vcpu_info_placement();
30743 }
30744@@ -300,12 +295,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
30745 gdt = get_cpu_gdt_table(cpu);
30746
30747 ctxt->flags = VGCF_IN_KERNEL;
30748- ctxt->user_regs.ds = __USER_DS;
30749- ctxt->user_regs.es = __USER_DS;
30750+ ctxt->user_regs.ds = __KERNEL_DS;
30751+ ctxt->user_regs.es = __KERNEL_DS;
30752 ctxt->user_regs.ss = __KERNEL_DS;
30753 #ifdef CONFIG_X86_32
30754 ctxt->user_regs.fs = __KERNEL_PERCPU;
30755- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
30756+ savesegment(gs, ctxt->user_regs.gs);
30757 #else
30758 ctxt->gs_base_kernel = per_cpu_offset(cpu);
30759 #endif
30760@@ -355,13 +350,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
30761 int rc;
30762
30763 per_cpu(current_task, cpu) = idle;
30764+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
30765 #ifdef CONFIG_X86_32
30766 irq_ctx_init(cpu);
30767 #else
30768 clear_tsk_thread_flag(idle, TIF_FORK);
30769- per_cpu(kernel_stack, cpu) =
30770- (unsigned long)task_stack_page(idle) -
30771- KERNEL_STACK_OFFSET + THREAD_SIZE;
30772+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
30773 #endif
30774 xen_setup_runstate_info(cpu);
30775 xen_setup_timer(cpu);
30776@@ -630,7 +624,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
30777
30778 void __init xen_smp_init(void)
30779 {
30780- smp_ops = xen_smp_ops;
30781+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
30782 xen_fill_possible_map();
30783 xen_init_spinlocks();
30784 }
30785diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
30786index 33ca6e4..0ded929 100644
30787--- a/arch/x86/xen/xen-asm_32.S
30788+++ b/arch/x86/xen/xen-asm_32.S
30789@@ -84,14 +84,14 @@ ENTRY(xen_iret)
30790 ESP_OFFSET=4 # bytes pushed onto stack
30791
30792 /*
30793- * Store vcpu_info pointer for easy access. Do it this way to
30794- * avoid having to reload %fs
30795+ * Store vcpu_info pointer for easy access.
30796 */
30797 #ifdef CONFIG_SMP
30798- GET_THREAD_INFO(%eax)
30799- movl %ss:TI_cpu(%eax), %eax
30800- movl %ss:__per_cpu_offset(,%eax,4), %eax
30801- mov %ss:xen_vcpu(%eax), %eax
30802+ push %fs
30803+ mov $(__KERNEL_PERCPU), %eax
30804+ mov %eax, %fs
30805+ mov PER_CPU_VAR(xen_vcpu), %eax
30806+ pop %fs
30807 #else
30808 movl %ss:xen_vcpu, %eax
30809 #endif
30810diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
30811index 7faed58..ba4427c 100644
30812--- a/arch/x86/xen/xen-head.S
30813+++ b/arch/x86/xen/xen-head.S
30814@@ -19,6 +19,17 @@ ENTRY(startup_xen)
30815 #ifdef CONFIG_X86_32
30816 mov %esi,xen_start_info
30817 mov $init_thread_union+THREAD_SIZE,%esp
30818+#ifdef CONFIG_SMP
30819+ movl $cpu_gdt_table,%edi
30820+ movl $__per_cpu_load,%eax
30821+ movw %ax,__KERNEL_PERCPU + 2(%edi)
30822+ rorl $16,%eax
30823+ movb %al,__KERNEL_PERCPU + 4(%edi)
30824+ movb %ah,__KERNEL_PERCPU + 7(%edi)
30825+ movl $__per_cpu_end - 1,%eax
30826+ subl $__per_cpu_start,%eax
30827+ movw %ax,__KERNEL_PERCPU + 0(%edi)
30828+#endif
30829 #else
30830 mov %rsi,xen_start_info
30831 mov $init_thread_union+THREAD_SIZE,%rsp
30832diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
30833index a95b417..b6dbd0b 100644
30834--- a/arch/x86/xen/xen-ops.h
30835+++ b/arch/x86/xen/xen-ops.h
30836@@ -10,8 +10,6 @@
30837 extern const char xen_hypervisor_callback[];
30838 extern const char xen_failsafe_callback[];
30839
30840-extern void *xen_initial_gdt;
30841-
30842 struct trap_info;
30843 void xen_copy_trap_info(struct trap_info *traps);
30844
30845diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
30846index 525bd3d..ef888b1 100644
30847--- a/arch/xtensa/variants/dc232b/include/variant/core.h
30848+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
30849@@ -119,9 +119,9 @@
30850 ----------------------------------------------------------------------*/
30851
30852 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
30853-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
30854 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
30855 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
30856+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30857
30858 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
30859 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
30860diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
30861index 2f33760..835e50a 100644
30862--- a/arch/xtensa/variants/fsf/include/variant/core.h
30863+++ b/arch/xtensa/variants/fsf/include/variant/core.h
30864@@ -11,6 +11,7 @@
30865 #ifndef _XTENSA_CORE_H
30866 #define _XTENSA_CORE_H
30867
30868+#include <linux/const.h>
30869
30870 /****************************************************************************
30871 Parameters Useful for Any Code, USER or PRIVILEGED
30872@@ -112,9 +113,9 @@
30873 ----------------------------------------------------------------------*/
30874
30875 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
30876-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
30877 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
30878 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
30879+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30880
30881 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
30882 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
30883diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
30884index af00795..2bb8105 100644
30885--- a/arch/xtensa/variants/s6000/include/variant/core.h
30886+++ b/arch/xtensa/variants/s6000/include/variant/core.h
30887@@ -11,6 +11,7 @@
30888 #ifndef _XTENSA_CORE_CONFIGURATION_H
30889 #define _XTENSA_CORE_CONFIGURATION_H
30890
30891+#include <linux/const.h>
30892
30893 /****************************************************************************
30894 Parameters Useful for Any Code, USER or PRIVILEGED
30895@@ -118,9 +119,9 @@
30896 ----------------------------------------------------------------------*/
30897
30898 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
30899-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
30900 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
30901 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
30902+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
30903
30904 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
30905 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
30906diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
30907index 58916af..eb9dbcf6 100644
30908--- a/block/blk-iopoll.c
30909+++ b/block/blk-iopoll.c
30910@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
30911 }
30912 EXPORT_SYMBOL(blk_iopoll_complete);
30913
30914-static void blk_iopoll_softirq(struct softirq_action *h)
30915+static void blk_iopoll_softirq(void)
30916 {
30917 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
30918 int rearm = 0, budget = blk_iopoll_budget;
30919@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
30920 return NOTIFY_OK;
30921 }
30922
30923-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
30924+static struct notifier_block blk_iopoll_cpu_notifier = {
30925 .notifier_call = blk_iopoll_cpu_notify,
30926 };
30927
30928diff --git a/block/blk-map.c b/block/blk-map.c
30929index 623e1cd..ca1e109 100644
30930--- a/block/blk-map.c
30931+++ b/block/blk-map.c
30932@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
30933 if (!len || !kbuf)
30934 return -EINVAL;
30935
30936- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
30937+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
30938 if (do_copy)
30939 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
30940 else
30941diff --git a/block/blk-softirq.c b/block/blk-softirq.c
30942index 467c8de..f3628c5 100644
30943--- a/block/blk-softirq.c
30944+++ b/block/blk-softirq.c
30945@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
30946 * Softirq action handler - move entries to local list and loop over them
30947 * while passing them to the queue registered handler.
30948 */
30949-static void blk_done_softirq(struct softirq_action *h)
30950+static void blk_done_softirq(void)
30951 {
30952 struct list_head *cpu_list, local_list;
30953
30954@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
30955 return NOTIFY_OK;
30956 }
30957
30958-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
30959+static struct notifier_block blk_cpu_notifier = {
30960 .notifier_call = blk_cpu_notify,
30961 };
30962
30963diff --git a/block/bsg.c b/block/bsg.c
30964index ff64ae3..593560c 100644
30965--- a/block/bsg.c
30966+++ b/block/bsg.c
30967@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
30968 struct sg_io_v4 *hdr, struct bsg_device *bd,
30969 fmode_t has_write_perm)
30970 {
30971+ unsigned char tmpcmd[sizeof(rq->__cmd)];
30972+ unsigned char *cmdptr;
30973+
30974 if (hdr->request_len > BLK_MAX_CDB) {
30975 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
30976 if (!rq->cmd)
30977 return -ENOMEM;
30978- }
30979+ cmdptr = rq->cmd;
30980+ } else
30981+ cmdptr = tmpcmd;
30982
30983- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
30984+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
30985 hdr->request_len))
30986 return -EFAULT;
30987
30988+ if (cmdptr != rq->cmd)
30989+ memcpy(rq->cmd, cmdptr, hdr->request_len);
30990+
30991 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
30992 if (blk_verify_command(rq->cmd, has_write_perm))
30993 return -EPERM;
30994diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
30995index 7c668c8..db3521c 100644
30996--- a/block/compat_ioctl.c
30997+++ b/block/compat_ioctl.c
30998@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
30999 err |= __get_user(f->spec1, &uf->spec1);
31000 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31001 err |= __get_user(name, &uf->name);
31002- f->name = compat_ptr(name);
31003+ f->name = (void __force_kernel *)compat_ptr(name);
31004 if (err) {
31005 err = -EFAULT;
31006 goto out;
31007diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31008index b62fb88..bdab4c4 100644
31009--- a/block/partitions/efi.c
31010+++ b/block/partitions/efi.c
31011@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31012 if (!gpt)
31013 return NULL;
31014
31015+ if (!le32_to_cpu(gpt->num_partition_entries))
31016+ return NULL;
31017+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31018+ if (!pte)
31019+ return NULL;
31020+
31021 count = le32_to_cpu(gpt->num_partition_entries) *
31022 le32_to_cpu(gpt->sizeof_partition_entry);
31023- if (!count)
31024- return NULL;
31025- pte = kzalloc(count, GFP_KERNEL);
31026- if (!pte)
31027- return NULL;
31028-
31029 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31030 (u8 *) pte,
31031 count) < count) {
31032diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31033index 9a87daa..fb17486 100644
31034--- a/block/scsi_ioctl.c
31035+++ b/block/scsi_ioctl.c
31036@@ -223,8 +223,20 @@ EXPORT_SYMBOL(blk_verify_command);
31037 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31038 struct sg_io_hdr *hdr, fmode_t mode)
31039 {
31040- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31041+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31042+ unsigned char *cmdptr;
31043+
31044+ if (rq->cmd != rq->__cmd)
31045+ cmdptr = rq->cmd;
31046+ else
31047+ cmdptr = tmpcmd;
31048+
31049+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31050 return -EFAULT;
31051+
31052+ if (cmdptr != rq->cmd)
31053+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31054+
31055 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31056 return -EPERM;
31057
31058@@ -433,6 +445,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31059 int err;
31060 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31061 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31062+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31063+ unsigned char *cmdptr;
31064
31065 if (!sic)
31066 return -EINVAL;
31067@@ -466,9 +480,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31068 */
31069 err = -EFAULT;
31070 rq->cmd_len = cmdlen;
31071- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31072+
31073+ if (rq->cmd != rq->__cmd)
31074+ cmdptr = rq->cmd;
31075+ else
31076+ cmdptr = tmpcmd;
31077+
31078+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31079 goto error;
31080
31081+ if (rq->cmd != cmdptr)
31082+ memcpy(rq->cmd, cmdptr, cmdlen);
31083+
31084 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31085 goto error;
31086
31087diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
31088index 533de95..7d4a8d2 100644
31089--- a/crypto/ablkcipher.c
31090+++ b/crypto/ablkcipher.c
31091@@ -388,9 +388,9 @@ static int crypto_ablkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
31092 {
31093 struct crypto_report_blkcipher rblkcipher;
31094
31095- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "ablkcipher");
31096- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31097- alg->cra_ablkcipher.geniv ?: "<default>");
31098+ strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
31099+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
31100+ sizeof(rblkcipher.geniv));
31101
31102 rblkcipher.blocksize = alg->cra_blocksize;
31103 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
31104@@ -469,9 +469,9 @@ static int crypto_givcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
31105 {
31106 struct crypto_report_blkcipher rblkcipher;
31107
31108- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "givcipher");
31109- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31110- alg->cra_ablkcipher.geniv ?: "<built-in>");
31111+ strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
31112+ strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
31113+ sizeof(rblkcipher.geniv));
31114
31115 rblkcipher.blocksize = alg->cra_blocksize;
31116 rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
31117diff --git a/crypto/aead.c b/crypto/aead.c
31118index 0b8121e..27bc487 100644
31119--- a/crypto/aead.c
31120+++ b/crypto/aead.c
31121@@ -117,9 +117,8 @@ static int crypto_aead_report(struct sk_buff *skb, struct crypto_alg *alg)
31122 struct crypto_report_aead raead;
31123 struct aead_alg *aead = &alg->cra_aead;
31124
31125- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "aead");
31126- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31127- aead->geniv ?: "<built-in>");
31128+ strncpy(raead.type, "aead", sizeof(raead.type));
31129+ strncpy(raead.geniv, aead->geniv ?: "<built-in>", sizeof(raead.geniv));
31130
31131 raead.blocksize = alg->cra_blocksize;
31132 raead.maxauthsize = aead->maxauthsize;
31133@@ -203,8 +202,8 @@ static int crypto_nivaead_report(struct sk_buff *skb, struct crypto_alg *alg)
31134 struct crypto_report_aead raead;
31135 struct aead_alg *aead = &alg->cra_aead;
31136
31137- snprintf(raead.type, CRYPTO_MAX_ALG_NAME, "%s", "nivaead");
31138- snprintf(raead.geniv, CRYPTO_MAX_ALG_NAME, "%s", aead->geniv);
31139+ strncpy(raead.type, "nivaead", sizeof(raead.type));
31140+ strncpy(raead.geniv, aead->geniv, sizeof(raead.geniv));
31141
31142 raead.blocksize = alg->cra_blocksize;
31143 raead.maxauthsize = aead->maxauthsize;
31144diff --git a/crypto/ahash.c b/crypto/ahash.c
31145index 3887856..793a27f 100644
31146--- a/crypto/ahash.c
31147+++ b/crypto/ahash.c
31148@@ -404,7 +404,7 @@ static int crypto_ahash_report(struct sk_buff *skb, struct crypto_alg *alg)
31149 {
31150 struct crypto_report_hash rhash;
31151
31152- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "ahash");
31153+ strncpy(rhash.type, "ahash", sizeof(rhash.type));
31154
31155 rhash.blocksize = alg->cra_blocksize;
31156 rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize;
31157diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
31158index a8d85a1..c44e014 100644
31159--- a/crypto/blkcipher.c
31160+++ b/crypto/blkcipher.c
31161@@ -499,9 +499,9 @@ static int crypto_blkcipher_report(struct sk_buff *skb, struct crypto_alg *alg)
31162 {
31163 struct crypto_report_blkcipher rblkcipher;
31164
31165- snprintf(rblkcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "blkcipher");
31166- snprintf(rblkcipher.geniv, CRYPTO_MAX_ALG_NAME, "%s",
31167- alg->cra_blkcipher.geniv ?: "<default>");
31168+ strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
31169+ strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
31170+ sizeof(rblkcipher.geniv));
31171
31172 rblkcipher.blocksize = alg->cra_blocksize;
31173 rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
31174diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31175index 7bdd61b..afec999 100644
31176--- a/crypto/cryptd.c
31177+++ b/crypto/cryptd.c
31178@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31179
31180 struct cryptd_blkcipher_request_ctx {
31181 crypto_completion_t complete;
31182-};
31183+} __no_const;
31184
31185 struct cryptd_hash_ctx {
31186 struct crypto_shash *child;
31187@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31188
31189 struct cryptd_aead_request_ctx {
31190 crypto_completion_t complete;
31191-};
31192+} __no_const;
31193
31194 static void cryptd_queue_worker(struct work_struct *work);
31195
31196diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
31197index 35d700a..dfd511f 100644
31198--- a/crypto/crypto_user.c
31199+++ b/crypto/crypto_user.c
31200@@ -30,6 +30,8 @@
31201
31202 #include "internal.h"
31203
31204+#define null_terminated(x) (strnlen(x, sizeof(x)) < sizeof(x))
31205+
31206 static DEFINE_MUTEX(crypto_cfg_mutex);
31207
31208 /* The crypto netlink socket */
31209@@ -75,7 +77,7 @@ static int crypto_report_cipher(struct sk_buff *skb, struct crypto_alg *alg)
31210 {
31211 struct crypto_report_cipher rcipher;
31212
31213- snprintf(rcipher.type, CRYPTO_MAX_ALG_NAME, "%s", "cipher");
31214+ strncpy(rcipher.type, "cipher", sizeof(rcipher.type));
31215
31216 rcipher.blocksize = alg->cra_blocksize;
31217 rcipher.min_keysize = alg->cra_cipher.cia_min_keysize;
31218@@ -94,8 +96,7 @@ static int crypto_report_comp(struct sk_buff *skb, struct crypto_alg *alg)
31219 {
31220 struct crypto_report_comp rcomp;
31221
31222- snprintf(rcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "compression");
31223-
31224+ strncpy(rcomp.type, "compression", sizeof(rcomp.type));
31225 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
31226 sizeof(struct crypto_report_comp), &rcomp))
31227 goto nla_put_failure;
31228@@ -108,12 +109,14 @@ nla_put_failure:
31229 static int crypto_report_one(struct crypto_alg *alg,
31230 struct crypto_user_alg *ualg, struct sk_buff *skb)
31231 {
31232- memcpy(&ualg->cru_name, &alg->cra_name, sizeof(ualg->cru_name));
31233- memcpy(&ualg->cru_driver_name, &alg->cra_driver_name,
31234- sizeof(ualg->cru_driver_name));
31235- memcpy(&ualg->cru_module_name, module_name(alg->cra_module),
31236- CRYPTO_MAX_ALG_NAME);
31237+ strncpy(ualg->cru_name, alg->cra_name, sizeof(ualg->cru_name));
31238+ strncpy(ualg->cru_driver_name, alg->cra_driver_name,
31239+ sizeof(ualg->cru_driver_name));
31240+ strncpy(ualg->cru_module_name, module_name(alg->cra_module),
31241+ sizeof(ualg->cru_module_name));
31242
31243+ ualg->cru_type = 0;
31244+ ualg->cru_mask = 0;
31245 ualg->cru_flags = alg->cra_flags;
31246 ualg->cru_refcnt = atomic_read(&alg->cra_refcnt);
31247
31248@@ -122,8 +125,7 @@ static int crypto_report_one(struct crypto_alg *alg,
31249 if (alg->cra_flags & CRYPTO_ALG_LARVAL) {
31250 struct crypto_report_larval rl;
31251
31252- snprintf(rl.type, CRYPTO_MAX_ALG_NAME, "%s", "larval");
31253-
31254+ strncpy(rl.type, "larval", sizeof(rl.type));
31255 if (nla_put(skb, CRYPTOCFGA_REPORT_LARVAL,
31256 sizeof(struct crypto_report_larval), &rl))
31257 goto nla_put_failure;
31258@@ -196,7 +198,10 @@ static int crypto_report(struct sk_buff *in_skb, struct nlmsghdr *in_nlh,
31259 struct crypto_dump_info info;
31260 int err;
31261
31262- if (!p->cru_driver_name)
31263+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31264+ return -EINVAL;
31265+
31266+ if (!p->cru_driver_name[0])
31267 return -EINVAL;
31268
31269 alg = crypto_alg_match(p, 1);
31270@@ -260,6 +265,9 @@ static int crypto_update_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31271 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31272 LIST_HEAD(list);
31273
31274+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31275+ return -EINVAL;
31276+
31277 if (priority && !strlen(p->cru_driver_name))
31278 return -EINVAL;
31279
31280@@ -287,6 +295,9 @@ static int crypto_del_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31281 struct crypto_alg *alg;
31282 struct crypto_user_alg *p = nlmsg_data(nlh);
31283
31284+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31285+ return -EINVAL;
31286+
31287 alg = crypto_alg_match(p, 1);
31288 if (!alg)
31289 return -ENOENT;
31290@@ -368,6 +379,9 @@ static int crypto_add_alg(struct sk_buff *skb, struct nlmsghdr *nlh,
31291 struct crypto_user_alg *p = nlmsg_data(nlh);
31292 struct nlattr *priority = attrs[CRYPTOCFGA_PRIORITY_VAL];
31293
31294+ if (!null_terminated(p->cru_name) || !null_terminated(p->cru_driver_name))
31295+ return -EINVAL;
31296+
31297 if (strlen(p->cru_driver_name))
31298 exact = 1;
31299
31300diff --git a/crypto/pcompress.c b/crypto/pcompress.c
31301index 04e083f..7140fe7 100644
31302--- a/crypto/pcompress.c
31303+++ b/crypto/pcompress.c
31304@@ -53,8 +53,7 @@ static int crypto_pcomp_report(struct sk_buff *skb, struct crypto_alg *alg)
31305 {
31306 struct crypto_report_comp rpcomp;
31307
31308- snprintf(rpcomp.type, CRYPTO_MAX_ALG_NAME, "%s", "pcomp");
31309-
31310+ strncpy(rpcomp.type, "pcomp", sizeof(rpcomp.type));
31311 if (nla_put(skb, CRYPTOCFGA_REPORT_COMPRESS,
31312 sizeof(struct crypto_report_comp), &rpcomp))
31313 goto nla_put_failure;
31314diff --git a/crypto/rng.c b/crypto/rng.c
31315index f3b7894..e0a25c2 100644
31316--- a/crypto/rng.c
31317+++ b/crypto/rng.c
31318@@ -65,7 +65,7 @@ static int crypto_rng_report(struct sk_buff *skb, struct crypto_alg *alg)
31319 {
31320 struct crypto_report_rng rrng;
31321
31322- snprintf(rrng.type, CRYPTO_MAX_ALG_NAME, "%s", "rng");
31323+ strncpy(rrng.type, "rng", sizeof(rrng.type));
31324
31325 rrng.seedsize = alg->cra_rng.seedsize;
31326
31327diff --git a/crypto/shash.c b/crypto/shash.c
31328index f426330f..929058a 100644
31329--- a/crypto/shash.c
31330+++ b/crypto/shash.c
31331@@ -530,7 +530,8 @@ static int crypto_shash_report(struct sk_buff *skb, struct crypto_alg *alg)
31332 struct crypto_report_hash rhash;
31333 struct shash_alg *salg = __crypto_shash_alg(alg);
31334
31335- snprintf(rhash.type, CRYPTO_MAX_ALG_NAME, "%s", "shash");
31336+ strncpy(rhash.type, "shash", sizeof(rhash.type));
31337+
31338 rhash.blocksize = alg->cra_blocksize;
31339 rhash.digestsize = salg->digestsize;
31340
31341diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31342index f220d64..d359ad6 100644
31343--- a/drivers/acpi/apei/apei-internal.h
31344+++ b/drivers/acpi/apei/apei-internal.h
31345@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31346 struct apei_exec_ins_type {
31347 u32 flags;
31348 apei_exec_ins_func_t run;
31349-};
31350+} __do_const;
31351
31352 struct apei_exec_context {
31353 u32 ip;
31354diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31355index e6defd8..c26a225 100644
31356--- a/drivers/acpi/apei/cper.c
31357+++ b/drivers/acpi/apei/cper.c
31358@@ -38,12 +38,12 @@
31359 */
31360 u64 cper_next_record_id(void)
31361 {
31362- static atomic64_t seq;
31363+ static atomic64_unchecked_t seq;
31364
31365- if (!atomic64_read(&seq))
31366- atomic64_set(&seq, ((u64)get_seconds()) << 32);
31367+ if (!atomic64_read_unchecked(&seq))
31368+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31369
31370- return atomic64_inc_return(&seq);
31371+ return atomic64_inc_return_unchecked(&seq);
31372 }
31373 EXPORT_SYMBOL_GPL(cper_next_record_id);
31374
31375diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31376index be60399..778b33e8 100644
31377--- a/drivers/acpi/bgrt.c
31378+++ b/drivers/acpi/bgrt.c
31379@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31380 return -ENODEV;
31381
31382 sysfs_bin_attr_init(&image_attr);
31383- image_attr.private = bgrt_image;
31384- image_attr.size = bgrt_image_size;
31385+ pax_open_kernel();
31386+ *(void **)&image_attr.private = bgrt_image;
31387+ *(size_t *)&image_attr.size = bgrt_image_size;
31388+ pax_close_kernel();
31389
31390 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31391 if (!bgrt_kobj)
31392diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31393index cb96296..2d6082b 100644
31394--- a/drivers/acpi/blacklist.c
31395+++ b/drivers/acpi/blacklist.c
31396@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
31397 return 0;
31398 }
31399
31400-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
31401+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
31402 {
31403 .callback = dmi_disable_osi_vista,
31404 .ident = "Fujitsu Siemens",
31405diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
31406index 7586544..636a2f0 100644
31407--- a/drivers/acpi/ec_sys.c
31408+++ b/drivers/acpi/ec_sys.c
31409@@ -12,6 +12,7 @@
31410 #include <linux/acpi.h>
31411 #include <linux/debugfs.h>
31412 #include <linux/module.h>
31413+#include <linux/uaccess.h>
31414 #include "internal.h"
31415
31416 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
31417@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31418 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
31419 */
31420 unsigned int size = EC_SPACE_SIZE;
31421- u8 *data = (u8 *) buf;
31422+ u8 data;
31423 loff_t init_off = *off;
31424 int err = 0;
31425
31426@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31427 size = count;
31428
31429 while (size) {
31430- err = ec_read(*off, &data[*off - init_off]);
31431+ err = ec_read(*off, &data);
31432 if (err)
31433 return err;
31434+ if (put_user(data, &buf[*off - init_off]))
31435+ return -EFAULT;
31436 *off += 1;
31437 size--;
31438 }
31439@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31440
31441 unsigned int size = count;
31442 loff_t init_off = *off;
31443- u8 *data = (u8 *) buf;
31444 int err = 0;
31445
31446 if (*off >= EC_SPACE_SIZE)
31447@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31448 }
31449
31450 while (size) {
31451- u8 byte_write = data[*off - init_off];
31452+ u8 byte_write;
31453+ if (get_user(byte_write, &buf[*off - init_off]))
31454+ return -EFAULT;
31455 err = ec_write(*off, byte_write);
31456 if (err)
31457 return err;
31458diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c
31459index e83311b..142b5cc 100644
31460--- a/drivers/acpi/processor_driver.c
31461+++ b/drivers/acpi/processor_driver.c
31462@@ -558,7 +558,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
31463 return 0;
31464 #endif
31465
31466- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
31467+ BUG_ON(pr->id >= nr_cpu_ids);
31468
31469 /*
31470 * Buggy BIOS check
31471diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
31472index ed9a1cc..f4a354c 100644
31473--- a/drivers/acpi/processor_idle.c
31474+++ b/drivers/acpi/processor_idle.c
31475@@ -1005,7 +1005,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
31476 {
31477 int i, count = CPUIDLE_DRIVER_STATE_START;
31478 struct acpi_processor_cx *cx;
31479- struct cpuidle_state *state;
31480+ cpuidle_state_no_const *state;
31481 struct cpuidle_driver *drv = &acpi_idle_driver;
31482
31483 if (!pr->flags.power_setup_done)
31484diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
31485index ea61ca9..3fdd70d 100644
31486--- a/drivers/acpi/sysfs.c
31487+++ b/drivers/acpi/sysfs.c
31488@@ -420,11 +420,11 @@ static u32 num_counters;
31489 static struct attribute **all_attrs;
31490 static u32 acpi_gpe_count;
31491
31492-static struct attribute_group interrupt_stats_attr_group = {
31493+static attribute_group_no_const interrupt_stats_attr_group = {
31494 .name = "interrupts",
31495 };
31496
31497-static struct kobj_attribute *counter_attrs;
31498+static kobj_attribute_no_const *counter_attrs;
31499
31500 static void delete_gpe_attr_array(void)
31501 {
31502diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
31503index 46cd3f4..0871ad0 100644
31504--- a/drivers/ata/libata-core.c
31505+++ b/drivers/ata/libata-core.c
31506@@ -4780,7 +4780,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
31507 struct ata_port *ap;
31508 unsigned int tag;
31509
31510- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31511+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31512 ap = qc->ap;
31513
31514 qc->flags = 0;
31515@@ -4796,7 +4796,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
31516 struct ata_port *ap;
31517 struct ata_link *link;
31518
31519- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31520+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
31521 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
31522 ap = qc->ap;
31523 link = qc->dev->link;
31524@@ -5892,6 +5892,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31525 return;
31526
31527 spin_lock(&lock);
31528+ pax_open_kernel();
31529
31530 for (cur = ops->inherits; cur; cur = cur->inherits) {
31531 void **inherit = (void **)cur;
31532@@ -5905,8 +5906,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
31533 if (IS_ERR(*pp))
31534 *pp = NULL;
31535
31536- ops->inherits = NULL;
31537+ *(struct ata_port_operations **)&ops->inherits = NULL;
31538
31539+ pax_close_kernel();
31540 spin_unlock(&lock);
31541 }
31542
31543diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
31544index 405022d..fb70e53 100644
31545--- a/drivers/ata/pata_arasan_cf.c
31546+++ b/drivers/ata/pata_arasan_cf.c
31547@@ -864,7 +864,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
31548 /* Handle platform specific quirks */
31549 if (pdata->quirk) {
31550 if (pdata->quirk & CF_BROKEN_PIO) {
31551- ap->ops->set_piomode = NULL;
31552+ pax_open_kernel();
31553+ *(void **)&ap->ops->set_piomode = NULL;
31554+ pax_close_kernel();
31555 ap->pio_mask = 0;
31556 }
31557 if (pdata->quirk & CF_BROKEN_MWDMA)
31558diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
31559index f9b983a..887b9d8 100644
31560--- a/drivers/atm/adummy.c
31561+++ b/drivers/atm/adummy.c
31562@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
31563 vcc->pop(vcc, skb);
31564 else
31565 dev_kfree_skb_any(skb);
31566- atomic_inc(&vcc->stats->tx);
31567+ atomic_inc_unchecked(&vcc->stats->tx);
31568
31569 return 0;
31570 }
31571diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
31572index 77a7480..05cde58 100644
31573--- a/drivers/atm/ambassador.c
31574+++ b/drivers/atm/ambassador.c
31575@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
31576 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
31577
31578 // VC layer stats
31579- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31580+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31581
31582 // free the descriptor
31583 kfree (tx_descr);
31584@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31585 dump_skb ("<<<", vc, skb);
31586
31587 // VC layer stats
31588- atomic_inc(&atm_vcc->stats->rx);
31589+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31590 __net_timestamp(skb);
31591 // end of our responsibility
31592 atm_vcc->push (atm_vcc, skb);
31593@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
31594 } else {
31595 PRINTK (KERN_INFO, "dropped over-size frame");
31596 // should we count this?
31597- atomic_inc(&atm_vcc->stats->rx_drop);
31598+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31599 }
31600
31601 } else {
31602@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
31603 }
31604
31605 if (check_area (skb->data, skb->len)) {
31606- atomic_inc(&atm_vcc->stats->tx_err);
31607+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
31608 return -ENOMEM; // ?
31609 }
31610
31611diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
31612index b22d71c..d6e1049 100644
31613--- a/drivers/atm/atmtcp.c
31614+++ b/drivers/atm/atmtcp.c
31615@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31616 if (vcc->pop) vcc->pop(vcc,skb);
31617 else dev_kfree_skb(skb);
31618 if (dev_data) return 0;
31619- atomic_inc(&vcc->stats->tx_err);
31620+ atomic_inc_unchecked(&vcc->stats->tx_err);
31621 return -ENOLINK;
31622 }
31623 size = skb->len+sizeof(struct atmtcp_hdr);
31624@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31625 if (!new_skb) {
31626 if (vcc->pop) vcc->pop(vcc,skb);
31627 else dev_kfree_skb(skb);
31628- atomic_inc(&vcc->stats->tx_err);
31629+ atomic_inc_unchecked(&vcc->stats->tx_err);
31630 return -ENOBUFS;
31631 }
31632 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
31633@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
31634 if (vcc->pop) vcc->pop(vcc,skb);
31635 else dev_kfree_skb(skb);
31636 out_vcc->push(out_vcc,new_skb);
31637- atomic_inc(&vcc->stats->tx);
31638- atomic_inc(&out_vcc->stats->rx);
31639+ atomic_inc_unchecked(&vcc->stats->tx);
31640+ atomic_inc_unchecked(&out_vcc->stats->rx);
31641 return 0;
31642 }
31643
31644@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31645 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
31646 read_unlock(&vcc_sklist_lock);
31647 if (!out_vcc) {
31648- atomic_inc(&vcc->stats->tx_err);
31649+ atomic_inc_unchecked(&vcc->stats->tx_err);
31650 goto done;
31651 }
31652 skb_pull(skb,sizeof(struct atmtcp_hdr));
31653@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
31654 __net_timestamp(new_skb);
31655 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
31656 out_vcc->push(out_vcc,new_skb);
31657- atomic_inc(&vcc->stats->tx);
31658- atomic_inc(&out_vcc->stats->rx);
31659+ atomic_inc_unchecked(&vcc->stats->tx);
31660+ atomic_inc_unchecked(&out_vcc->stats->rx);
31661 done:
31662 if (vcc->pop) vcc->pop(vcc,skb);
31663 else dev_kfree_skb(skb);
31664diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
31665index c1eb6fa..4c71be9 100644
31666--- a/drivers/atm/eni.c
31667+++ b/drivers/atm/eni.c
31668@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
31669 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
31670 vcc->dev->number);
31671 length = 0;
31672- atomic_inc(&vcc->stats->rx_err);
31673+ atomic_inc_unchecked(&vcc->stats->rx_err);
31674 }
31675 else {
31676 length = ATM_CELL_SIZE-1; /* no HEC */
31677@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31678 size);
31679 }
31680 eff = length = 0;
31681- atomic_inc(&vcc->stats->rx_err);
31682+ atomic_inc_unchecked(&vcc->stats->rx_err);
31683 }
31684 else {
31685 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
31686@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
31687 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
31688 vcc->dev->number,vcc->vci,length,size << 2,descr);
31689 length = eff = 0;
31690- atomic_inc(&vcc->stats->rx_err);
31691+ atomic_inc_unchecked(&vcc->stats->rx_err);
31692 }
31693 }
31694 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
31695@@ -767,7 +767,7 @@ rx_dequeued++;
31696 vcc->push(vcc,skb);
31697 pushed++;
31698 }
31699- atomic_inc(&vcc->stats->rx);
31700+ atomic_inc_unchecked(&vcc->stats->rx);
31701 }
31702 wake_up(&eni_dev->rx_wait);
31703 }
31704@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
31705 PCI_DMA_TODEVICE);
31706 if (vcc->pop) vcc->pop(vcc,skb);
31707 else dev_kfree_skb_irq(skb);
31708- atomic_inc(&vcc->stats->tx);
31709+ atomic_inc_unchecked(&vcc->stats->tx);
31710 wake_up(&eni_dev->tx_wait);
31711 dma_complete++;
31712 }
31713diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
31714index b41c948..a002b17 100644
31715--- a/drivers/atm/firestream.c
31716+++ b/drivers/atm/firestream.c
31717@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
31718 }
31719 }
31720
31721- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31722+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31723
31724 fs_dprintk (FS_DEBUG_TXMEM, "i");
31725 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
31726@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31727 #endif
31728 skb_put (skb, qe->p1 & 0xffff);
31729 ATM_SKB(skb)->vcc = atm_vcc;
31730- atomic_inc(&atm_vcc->stats->rx);
31731+ atomic_inc_unchecked(&atm_vcc->stats->rx);
31732 __net_timestamp(skb);
31733 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
31734 atm_vcc->push (atm_vcc, skb);
31735@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
31736 kfree (pe);
31737 }
31738 if (atm_vcc)
31739- atomic_inc(&atm_vcc->stats->rx_drop);
31740+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31741 break;
31742 case 0x1f: /* Reassembly abort: no buffers. */
31743 /* Silently increment error counter. */
31744 if (atm_vcc)
31745- atomic_inc(&atm_vcc->stats->rx_drop);
31746+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
31747 break;
31748 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
31749 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
31750diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
31751index 204814e..cede831 100644
31752--- a/drivers/atm/fore200e.c
31753+++ b/drivers/atm/fore200e.c
31754@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
31755 #endif
31756 /* check error condition */
31757 if (*entry->status & STATUS_ERROR)
31758- atomic_inc(&vcc->stats->tx_err);
31759+ atomic_inc_unchecked(&vcc->stats->tx_err);
31760 else
31761- atomic_inc(&vcc->stats->tx);
31762+ atomic_inc_unchecked(&vcc->stats->tx);
31763 }
31764 }
31765
31766@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31767 if (skb == NULL) {
31768 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
31769
31770- atomic_inc(&vcc->stats->rx_drop);
31771+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31772 return -ENOMEM;
31773 }
31774
31775@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
31776
31777 dev_kfree_skb_any(skb);
31778
31779- atomic_inc(&vcc->stats->rx_drop);
31780+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31781 return -ENOMEM;
31782 }
31783
31784 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31785
31786 vcc->push(vcc, skb);
31787- atomic_inc(&vcc->stats->rx);
31788+ atomic_inc_unchecked(&vcc->stats->rx);
31789
31790 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
31791
31792@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
31793 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
31794 fore200e->atm_dev->number,
31795 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
31796- atomic_inc(&vcc->stats->rx_err);
31797+ atomic_inc_unchecked(&vcc->stats->rx_err);
31798 }
31799 }
31800
31801@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
31802 goto retry_here;
31803 }
31804
31805- atomic_inc(&vcc->stats->tx_err);
31806+ atomic_inc_unchecked(&vcc->stats->tx_err);
31807
31808 fore200e->tx_sat++;
31809 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
31810diff --git a/drivers/atm/he.c b/drivers/atm/he.c
31811index 72b6960..cf9167a 100644
31812--- a/drivers/atm/he.c
31813+++ b/drivers/atm/he.c
31814@@ -1699,7 +1699,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31815
31816 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
31817 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
31818- atomic_inc(&vcc->stats->rx_drop);
31819+ atomic_inc_unchecked(&vcc->stats->rx_drop);
31820 goto return_host_buffers;
31821 }
31822
31823@@ -1726,7 +1726,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31824 RBRQ_LEN_ERR(he_dev->rbrq_head)
31825 ? "LEN_ERR" : "",
31826 vcc->vpi, vcc->vci);
31827- atomic_inc(&vcc->stats->rx_err);
31828+ atomic_inc_unchecked(&vcc->stats->rx_err);
31829 goto return_host_buffers;
31830 }
31831
31832@@ -1778,7 +1778,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
31833 vcc->push(vcc, skb);
31834 spin_lock(&he_dev->global_lock);
31835
31836- atomic_inc(&vcc->stats->rx);
31837+ atomic_inc_unchecked(&vcc->stats->rx);
31838
31839 return_host_buffers:
31840 ++pdus_assembled;
31841@@ -2104,7 +2104,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
31842 tpd->vcc->pop(tpd->vcc, tpd->skb);
31843 else
31844 dev_kfree_skb_any(tpd->skb);
31845- atomic_inc(&tpd->vcc->stats->tx_err);
31846+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
31847 }
31848 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
31849 return;
31850@@ -2516,7 +2516,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31851 vcc->pop(vcc, skb);
31852 else
31853 dev_kfree_skb_any(skb);
31854- atomic_inc(&vcc->stats->tx_err);
31855+ atomic_inc_unchecked(&vcc->stats->tx_err);
31856 return -EINVAL;
31857 }
31858
31859@@ -2527,7 +2527,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31860 vcc->pop(vcc, skb);
31861 else
31862 dev_kfree_skb_any(skb);
31863- atomic_inc(&vcc->stats->tx_err);
31864+ atomic_inc_unchecked(&vcc->stats->tx_err);
31865 return -EINVAL;
31866 }
31867 #endif
31868@@ -2539,7 +2539,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31869 vcc->pop(vcc, skb);
31870 else
31871 dev_kfree_skb_any(skb);
31872- atomic_inc(&vcc->stats->tx_err);
31873+ atomic_inc_unchecked(&vcc->stats->tx_err);
31874 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31875 return -ENOMEM;
31876 }
31877@@ -2581,7 +2581,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31878 vcc->pop(vcc, skb);
31879 else
31880 dev_kfree_skb_any(skb);
31881- atomic_inc(&vcc->stats->tx_err);
31882+ atomic_inc_unchecked(&vcc->stats->tx_err);
31883 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31884 return -ENOMEM;
31885 }
31886@@ -2612,7 +2612,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
31887 __enqueue_tpd(he_dev, tpd, cid);
31888 spin_unlock_irqrestore(&he_dev->global_lock, flags);
31889
31890- atomic_inc(&vcc->stats->tx);
31891+ atomic_inc_unchecked(&vcc->stats->tx);
31892
31893 return 0;
31894 }
31895diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
31896index 1dc0519..1aadaf7 100644
31897--- a/drivers/atm/horizon.c
31898+++ b/drivers/atm/horizon.c
31899@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
31900 {
31901 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
31902 // VC layer stats
31903- atomic_inc(&vcc->stats->rx);
31904+ atomic_inc_unchecked(&vcc->stats->rx);
31905 __net_timestamp(skb);
31906 // end of our responsibility
31907 vcc->push (vcc, skb);
31908@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
31909 dev->tx_iovec = NULL;
31910
31911 // VC layer stats
31912- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
31913+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
31914
31915 // free the skb
31916 hrz_kfree_skb (skb);
31917diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
31918index 272f009..a18ba55 100644
31919--- a/drivers/atm/idt77252.c
31920+++ b/drivers/atm/idt77252.c
31921@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
31922 else
31923 dev_kfree_skb(skb);
31924
31925- atomic_inc(&vcc->stats->tx);
31926+ atomic_inc_unchecked(&vcc->stats->tx);
31927 }
31928
31929 atomic_dec(&scq->used);
31930@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31931 if ((sb = dev_alloc_skb(64)) == NULL) {
31932 printk("%s: Can't allocate buffers for aal0.\n",
31933 card->name);
31934- atomic_add(i, &vcc->stats->rx_drop);
31935+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
31936 break;
31937 }
31938 if (!atm_charge(vcc, sb->truesize)) {
31939 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
31940 card->name);
31941- atomic_add(i - 1, &vcc->stats->rx_drop);
31942+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
31943 dev_kfree_skb(sb);
31944 break;
31945 }
31946@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31947 ATM_SKB(sb)->vcc = vcc;
31948 __net_timestamp(sb);
31949 vcc->push(vcc, sb);
31950- atomic_inc(&vcc->stats->rx);
31951+ atomic_inc_unchecked(&vcc->stats->rx);
31952
31953 cell += ATM_CELL_PAYLOAD;
31954 }
31955@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31956 "(CDC: %08x)\n",
31957 card->name, len, rpp->len, readl(SAR_REG_CDC));
31958 recycle_rx_pool_skb(card, rpp);
31959- atomic_inc(&vcc->stats->rx_err);
31960+ atomic_inc_unchecked(&vcc->stats->rx_err);
31961 return;
31962 }
31963 if (stat & SAR_RSQE_CRC) {
31964 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
31965 recycle_rx_pool_skb(card, rpp);
31966- atomic_inc(&vcc->stats->rx_err);
31967+ atomic_inc_unchecked(&vcc->stats->rx_err);
31968 return;
31969 }
31970 if (skb_queue_len(&rpp->queue) > 1) {
31971@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31972 RXPRINTK("%s: Can't alloc RX skb.\n",
31973 card->name);
31974 recycle_rx_pool_skb(card, rpp);
31975- atomic_inc(&vcc->stats->rx_err);
31976+ atomic_inc_unchecked(&vcc->stats->rx_err);
31977 return;
31978 }
31979 if (!atm_charge(vcc, skb->truesize)) {
31980@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31981 __net_timestamp(skb);
31982
31983 vcc->push(vcc, skb);
31984- atomic_inc(&vcc->stats->rx);
31985+ atomic_inc_unchecked(&vcc->stats->rx);
31986
31987 return;
31988 }
31989@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
31990 __net_timestamp(skb);
31991
31992 vcc->push(vcc, skb);
31993- atomic_inc(&vcc->stats->rx);
31994+ atomic_inc_unchecked(&vcc->stats->rx);
31995
31996 if (skb->truesize > SAR_FB_SIZE_3)
31997 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
31998@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
31999 if (vcc->qos.aal != ATM_AAL0) {
32000 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32001 card->name, vpi, vci);
32002- atomic_inc(&vcc->stats->rx_drop);
32003+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32004 goto drop;
32005 }
32006
32007 if ((sb = dev_alloc_skb(64)) == NULL) {
32008 printk("%s: Can't allocate buffers for AAL0.\n",
32009 card->name);
32010- atomic_inc(&vcc->stats->rx_err);
32011+ atomic_inc_unchecked(&vcc->stats->rx_err);
32012 goto drop;
32013 }
32014
32015@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32016 ATM_SKB(sb)->vcc = vcc;
32017 __net_timestamp(sb);
32018 vcc->push(vcc, sb);
32019- atomic_inc(&vcc->stats->rx);
32020+ atomic_inc_unchecked(&vcc->stats->rx);
32021
32022 drop:
32023 skb_pull(queue, 64);
32024@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32025
32026 if (vc == NULL) {
32027 printk("%s: NULL connection in send().\n", card->name);
32028- atomic_inc(&vcc->stats->tx_err);
32029+ atomic_inc_unchecked(&vcc->stats->tx_err);
32030 dev_kfree_skb(skb);
32031 return -EINVAL;
32032 }
32033 if (!test_bit(VCF_TX, &vc->flags)) {
32034 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32035- atomic_inc(&vcc->stats->tx_err);
32036+ atomic_inc_unchecked(&vcc->stats->tx_err);
32037 dev_kfree_skb(skb);
32038 return -EINVAL;
32039 }
32040@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32041 break;
32042 default:
32043 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32044- atomic_inc(&vcc->stats->tx_err);
32045+ atomic_inc_unchecked(&vcc->stats->tx_err);
32046 dev_kfree_skb(skb);
32047 return -EINVAL;
32048 }
32049
32050 if (skb_shinfo(skb)->nr_frags != 0) {
32051 printk("%s: No scatter-gather yet.\n", card->name);
32052- atomic_inc(&vcc->stats->tx_err);
32053+ atomic_inc_unchecked(&vcc->stats->tx_err);
32054 dev_kfree_skb(skb);
32055 return -EINVAL;
32056 }
32057@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32058
32059 err = queue_skb(card, vc, skb, oam);
32060 if (err) {
32061- atomic_inc(&vcc->stats->tx_err);
32062+ atomic_inc_unchecked(&vcc->stats->tx_err);
32063 dev_kfree_skb(skb);
32064 return err;
32065 }
32066@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32067 skb = dev_alloc_skb(64);
32068 if (!skb) {
32069 printk("%s: Out of memory in send_oam().\n", card->name);
32070- atomic_inc(&vcc->stats->tx_err);
32071+ atomic_inc_unchecked(&vcc->stats->tx_err);
32072 return -ENOMEM;
32073 }
32074 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32075diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32076index 4217f29..88f547a 100644
32077--- a/drivers/atm/iphase.c
32078+++ b/drivers/atm/iphase.c
32079@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32080 status = (u_short) (buf_desc_ptr->desc_mode);
32081 if (status & (RX_CER | RX_PTE | RX_OFL))
32082 {
32083- atomic_inc(&vcc->stats->rx_err);
32084+ atomic_inc_unchecked(&vcc->stats->rx_err);
32085 IF_ERR(printk("IA: bad packet, dropping it");)
32086 if (status & RX_CER) {
32087 IF_ERR(printk(" cause: packet CRC error\n");)
32088@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32089 len = dma_addr - buf_addr;
32090 if (len > iadev->rx_buf_sz) {
32091 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32092- atomic_inc(&vcc->stats->rx_err);
32093+ atomic_inc_unchecked(&vcc->stats->rx_err);
32094 goto out_free_desc;
32095 }
32096
32097@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32098 ia_vcc = INPH_IA_VCC(vcc);
32099 if (ia_vcc == NULL)
32100 {
32101- atomic_inc(&vcc->stats->rx_err);
32102+ atomic_inc_unchecked(&vcc->stats->rx_err);
32103 atm_return(vcc, skb->truesize);
32104 dev_kfree_skb_any(skb);
32105 goto INCR_DLE;
32106@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32107 if ((length > iadev->rx_buf_sz) || (length >
32108 (skb->len - sizeof(struct cpcs_trailer))))
32109 {
32110- atomic_inc(&vcc->stats->rx_err);
32111+ atomic_inc_unchecked(&vcc->stats->rx_err);
32112 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32113 length, skb->len);)
32114 atm_return(vcc, skb->truesize);
32115@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32116
32117 IF_RX(printk("rx_dle_intr: skb push");)
32118 vcc->push(vcc,skb);
32119- atomic_inc(&vcc->stats->rx);
32120+ atomic_inc_unchecked(&vcc->stats->rx);
32121 iadev->rx_pkt_cnt++;
32122 }
32123 INCR_DLE:
32124@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32125 {
32126 struct k_sonet_stats *stats;
32127 stats = &PRIV(_ia_dev[board])->sonet_stats;
32128- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32129- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32130- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32131- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32132- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32133- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32134- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32135- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32136- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32137+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32138+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32139+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32140+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32141+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32142+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32143+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32144+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32145+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32146 }
32147 ia_cmds.status = 0;
32148 break;
32149@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32150 if ((desc == 0) || (desc > iadev->num_tx_desc))
32151 {
32152 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32153- atomic_inc(&vcc->stats->tx);
32154+ atomic_inc_unchecked(&vcc->stats->tx);
32155 if (vcc->pop)
32156 vcc->pop(vcc, skb);
32157 else
32158@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32159 ATM_DESC(skb) = vcc->vci;
32160 skb_queue_tail(&iadev->tx_dma_q, skb);
32161
32162- atomic_inc(&vcc->stats->tx);
32163+ atomic_inc_unchecked(&vcc->stats->tx);
32164 iadev->tx_pkt_cnt++;
32165 /* Increment transaction counter */
32166 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32167
32168 #if 0
32169 /* add flow control logic */
32170- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32171+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32172 if (iavcc->vc_desc_cnt > 10) {
32173 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32174 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32175diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32176index fa7d701..1e404c7 100644
32177--- a/drivers/atm/lanai.c
32178+++ b/drivers/atm/lanai.c
32179@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32180 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32181 lanai_endtx(lanai, lvcc);
32182 lanai_free_skb(lvcc->tx.atmvcc, skb);
32183- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32184+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32185 }
32186
32187 /* Try to fill the buffer - don't call unless there is backlog */
32188@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32189 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32190 __net_timestamp(skb);
32191 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32192- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32193+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32194 out:
32195 lvcc->rx.buf.ptr = end;
32196 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32197@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32198 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32199 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32200 lanai->stats.service_rxnotaal5++;
32201- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32202+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32203 return 0;
32204 }
32205 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32206@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32207 int bytes;
32208 read_unlock(&vcc_sklist_lock);
32209 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32210- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32211+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32212 lvcc->stats.x.aal5.service_trash++;
32213 bytes = (SERVICE_GET_END(s) * 16) -
32214 (((unsigned long) lvcc->rx.buf.ptr) -
32215@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32216 }
32217 if (s & SERVICE_STREAM) {
32218 read_unlock(&vcc_sklist_lock);
32219- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32220+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32221 lvcc->stats.x.aal5.service_stream++;
32222 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32223 "PDU on VCI %d!\n", lanai->number, vci);
32224@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32225 return 0;
32226 }
32227 DPRINTK("got rx crc error on vci %d\n", vci);
32228- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32229+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32230 lvcc->stats.x.aal5.service_rxcrc++;
32231 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32232 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32233diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32234index ed1d2b7..8cffc1f 100644
32235--- a/drivers/atm/nicstar.c
32236+++ b/drivers/atm/nicstar.c
32237@@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32238 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32239 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32240 card->index);
32241- atomic_inc(&vcc->stats->tx_err);
32242+ atomic_inc_unchecked(&vcc->stats->tx_err);
32243 dev_kfree_skb_any(skb);
32244 return -EINVAL;
32245 }
32246@@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32247 if (!vc->tx) {
32248 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32249 card->index);
32250- atomic_inc(&vcc->stats->tx_err);
32251+ atomic_inc_unchecked(&vcc->stats->tx_err);
32252 dev_kfree_skb_any(skb);
32253 return -EINVAL;
32254 }
32255@@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32256 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32257 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32258 card->index);
32259- atomic_inc(&vcc->stats->tx_err);
32260+ atomic_inc_unchecked(&vcc->stats->tx_err);
32261 dev_kfree_skb_any(skb);
32262 return -EINVAL;
32263 }
32264
32265 if (skb_shinfo(skb)->nr_frags != 0) {
32266 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32267- atomic_inc(&vcc->stats->tx_err);
32268+ atomic_inc_unchecked(&vcc->stats->tx_err);
32269 dev_kfree_skb_any(skb);
32270 return -EINVAL;
32271 }
32272@@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32273 }
32274
32275 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32276- atomic_inc(&vcc->stats->tx_err);
32277+ atomic_inc_unchecked(&vcc->stats->tx_err);
32278 dev_kfree_skb_any(skb);
32279 return -EIO;
32280 }
32281- atomic_inc(&vcc->stats->tx);
32282+ atomic_inc_unchecked(&vcc->stats->tx);
32283
32284 return 0;
32285 }
32286@@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32287 printk
32288 ("nicstar%d: Can't allocate buffers for aal0.\n",
32289 card->index);
32290- atomic_add(i, &vcc->stats->rx_drop);
32291+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32292 break;
32293 }
32294 if (!atm_charge(vcc, sb->truesize)) {
32295 RXPRINTK
32296 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32297 card->index);
32298- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32299+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32300 dev_kfree_skb_any(sb);
32301 break;
32302 }
32303@@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32304 ATM_SKB(sb)->vcc = vcc;
32305 __net_timestamp(sb);
32306 vcc->push(vcc, sb);
32307- atomic_inc(&vcc->stats->rx);
32308+ atomic_inc_unchecked(&vcc->stats->rx);
32309 cell += ATM_CELL_PAYLOAD;
32310 }
32311
32312@@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32313 if (iovb == NULL) {
32314 printk("nicstar%d: Out of iovec buffers.\n",
32315 card->index);
32316- atomic_inc(&vcc->stats->rx_drop);
32317+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32318 recycle_rx_buf(card, skb);
32319 return;
32320 }
32321@@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32322 small or large buffer itself. */
32323 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32324 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32325- atomic_inc(&vcc->stats->rx_err);
32326+ atomic_inc_unchecked(&vcc->stats->rx_err);
32327 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32328 NS_MAX_IOVECS);
32329 NS_PRV_IOVCNT(iovb) = 0;
32330@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32331 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32332 card->index);
32333 which_list(card, skb);
32334- atomic_inc(&vcc->stats->rx_err);
32335+ atomic_inc_unchecked(&vcc->stats->rx_err);
32336 recycle_rx_buf(card, skb);
32337 vc->rx_iov = NULL;
32338 recycle_iov_buf(card, iovb);
32339@@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32340 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32341 card->index);
32342 which_list(card, skb);
32343- atomic_inc(&vcc->stats->rx_err);
32344+ atomic_inc_unchecked(&vcc->stats->rx_err);
32345 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32346 NS_PRV_IOVCNT(iovb));
32347 vc->rx_iov = NULL;
32348@@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32349 printk(" - PDU size mismatch.\n");
32350 else
32351 printk(".\n");
32352- atomic_inc(&vcc->stats->rx_err);
32353+ atomic_inc_unchecked(&vcc->stats->rx_err);
32354 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32355 NS_PRV_IOVCNT(iovb));
32356 vc->rx_iov = NULL;
32357@@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32358 /* skb points to a small buffer */
32359 if (!atm_charge(vcc, skb->truesize)) {
32360 push_rxbufs(card, skb);
32361- atomic_inc(&vcc->stats->rx_drop);
32362+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32363 } else {
32364 skb_put(skb, len);
32365 dequeue_sm_buf(card, skb);
32366@@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32367 ATM_SKB(skb)->vcc = vcc;
32368 __net_timestamp(skb);
32369 vcc->push(vcc, skb);
32370- atomic_inc(&vcc->stats->rx);
32371+ atomic_inc_unchecked(&vcc->stats->rx);
32372 }
32373 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32374 struct sk_buff *sb;
32375@@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32376 if (len <= NS_SMBUFSIZE) {
32377 if (!atm_charge(vcc, sb->truesize)) {
32378 push_rxbufs(card, sb);
32379- atomic_inc(&vcc->stats->rx_drop);
32380+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32381 } else {
32382 skb_put(sb, len);
32383 dequeue_sm_buf(card, sb);
32384@@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32385 ATM_SKB(sb)->vcc = vcc;
32386 __net_timestamp(sb);
32387 vcc->push(vcc, sb);
32388- atomic_inc(&vcc->stats->rx);
32389+ atomic_inc_unchecked(&vcc->stats->rx);
32390 }
32391
32392 push_rxbufs(card, skb);
32393@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32394
32395 if (!atm_charge(vcc, skb->truesize)) {
32396 push_rxbufs(card, skb);
32397- atomic_inc(&vcc->stats->rx_drop);
32398+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32399 } else {
32400 dequeue_lg_buf(card, skb);
32401 #ifdef NS_USE_DESTRUCTORS
32402@@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32403 ATM_SKB(skb)->vcc = vcc;
32404 __net_timestamp(skb);
32405 vcc->push(vcc, skb);
32406- atomic_inc(&vcc->stats->rx);
32407+ atomic_inc_unchecked(&vcc->stats->rx);
32408 }
32409
32410 push_rxbufs(card, sb);
32411@@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32412 printk
32413 ("nicstar%d: Out of huge buffers.\n",
32414 card->index);
32415- atomic_inc(&vcc->stats->rx_drop);
32416+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32417 recycle_iovec_rx_bufs(card,
32418 (struct iovec *)
32419 iovb->data,
32420@@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32421 card->hbpool.count++;
32422 } else
32423 dev_kfree_skb_any(hb);
32424- atomic_inc(&vcc->stats->rx_drop);
32425+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32426 } else {
32427 /* Copy the small buffer to the huge buffer */
32428 sb = (struct sk_buff *)iov->iov_base;
32429@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32430 #endif /* NS_USE_DESTRUCTORS */
32431 __net_timestamp(hb);
32432 vcc->push(vcc, hb);
32433- atomic_inc(&vcc->stats->rx);
32434+ atomic_inc_unchecked(&vcc->stats->rx);
32435 }
32436 }
32437
32438diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
32439index 0474a89..06ea4a1 100644
32440--- a/drivers/atm/solos-pci.c
32441+++ b/drivers/atm/solos-pci.c
32442@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
32443 }
32444 atm_charge(vcc, skb->truesize);
32445 vcc->push(vcc, skb);
32446- atomic_inc(&vcc->stats->rx);
32447+ atomic_inc_unchecked(&vcc->stats->rx);
32448 break;
32449
32450 case PKT_STATUS:
32451@@ -1117,7 +1117,7 @@ static uint32_t fpga_tx(struct solos_card *card)
32452 vcc = SKB_CB(oldskb)->vcc;
32453
32454 if (vcc) {
32455- atomic_inc(&vcc->stats->tx);
32456+ atomic_inc_unchecked(&vcc->stats->tx);
32457 solos_pop(vcc, oldskb);
32458 } else {
32459 dev_kfree_skb_irq(oldskb);
32460diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
32461index 0215934..ce9f5b1 100644
32462--- a/drivers/atm/suni.c
32463+++ b/drivers/atm/suni.c
32464@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
32465
32466
32467 #define ADD_LIMITED(s,v) \
32468- atomic_add((v),&stats->s); \
32469- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
32470+ atomic_add_unchecked((v),&stats->s); \
32471+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
32472
32473
32474 static void suni_hz(unsigned long from_timer)
32475diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
32476index 5120a96..e2572bd 100644
32477--- a/drivers/atm/uPD98402.c
32478+++ b/drivers/atm/uPD98402.c
32479@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
32480 struct sonet_stats tmp;
32481 int error = 0;
32482
32483- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32484+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
32485 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
32486 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
32487 if (zero && !error) {
32488@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
32489
32490
32491 #define ADD_LIMITED(s,v) \
32492- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
32493- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
32494- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32495+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
32496+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
32497+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
32498
32499
32500 static void stat_event(struct atm_dev *dev)
32501@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
32502 if (reason & uPD98402_INT_PFM) stat_event(dev);
32503 if (reason & uPD98402_INT_PCO) {
32504 (void) GET(PCOCR); /* clear interrupt cause */
32505- atomic_add(GET(HECCT),
32506+ atomic_add_unchecked(GET(HECCT),
32507 &PRIV(dev)->sonet_stats.uncorr_hcs);
32508 }
32509 if ((reason & uPD98402_INT_RFO) &&
32510@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
32511 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
32512 uPD98402_INT_LOS),PIMR); /* enable them */
32513 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
32514- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32515- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
32516- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
32517+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
32518+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
32519+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
32520 return 0;
32521 }
32522
32523diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
32524index 969c3c2..9b72956 100644
32525--- a/drivers/atm/zatm.c
32526+++ b/drivers/atm/zatm.c
32527@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32528 }
32529 if (!size) {
32530 dev_kfree_skb_irq(skb);
32531- if (vcc) atomic_inc(&vcc->stats->rx_err);
32532+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
32533 continue;
32534 }
32535 if (!atm_charge(vcc,skb->truesize)) {
32536@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
32537 skb->len = size;
32538 ATM_SKB(skb)->vcc = vcc;
32539 vcc->push(vcc,skb);
32540- atomic_inc(&vcc->stats->rx);
32541+ atomic_inc_unchecked(&vcc->stats->rx);
32542 }
32543 zout(pos & 0xffff,MTA(mbx));
32544 #if 0 /* probably a stupid idea */
32545@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
32546 skb_queue_head(&zatm_vcc->backlog,skb);
32547 break;
32548 }
32549- atomic_inc(&vcc->stats->tx);
32550+ atomic_inc_unchecked(&vcc->stats->tx);
32551 wake_up(&zatm_vcc->tx_wait);
32552 }
32553
32554diff --git a/drivers/base/bus.c b/drivers/base/bus.c
32555index 6856303..0602d70 100644
32556--- a/drivers/base/bus.c
32557+++ b/drivers/base/bus.c
32558@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
32559 return -EINVAL;
32560
32561 mutex_lock(&subsys->p->mutex);
32562- list_add_tail(&sif->node, &subsys->p->interfaces);
32563+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
32564 if (sif->add_dev) {
32565 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32566 while ((dev = subsys_dev_iter_next(&iter)))
32567@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
32568 subsys = sif->subsys;
32569
32570 mutex_lock(&subsys->p->mutex);
32571- list_del_init(&sif->node);
32572+ pax_list_del_init((struct list_head *)&sif->node);
32573 if (sif->remove_dev) {
32574 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
32575 while ((dev = subsys_dev_iter_next(&iter)))
32576diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
32577index 17cf7ca..7e553e1 100644
32578--- a/drivers/base/devtmpfs.c
32579+++ b/drivers/base/devtmpfs.c
32580@@ -347,7 +347,7 @@ int devtmpfs_mount(const char *mntdir)
32581 if (!thread)
32582 return 0;
32583
32584- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
32585+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
32586 if (err)
32587 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
32588 else
32589diff --git a/drivers/base/node.c b/drivers/base/node.c
32590index fac124a..66bd4ab 100644
32591--- a/drivers/base/node.c
32592+++ b/drivers/base/node.c
32593@@ -625,7 +625,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
32594 struct node_attr {
32595 struct device_attribute attr;
32596 enum node_states state;
32597-};
32598+} __do_const;
32599
32600 static ssize_t show_node_state(struct device *dev,
32601 struct device_attribute *attr, char *buf)
32602diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
32603index acc3a8d..981c236 100644
32604--- a/drivers/base/power/domain.c
32605+++ b/drivers/base/power/domain.c
32606@@ -1851,7 +1851,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
32607 {
32608 struct cpuidle_driver *cpuidle_drv;
32609 struct gpd_cpu_data *cpu_data;
32610- struct cpuidle_state *idle_state;
32611+ cpuidle_state_no_const *idle_state;
32612 int ret = 0;
32613
32614 if (IS_ERR_OR_NULL(genpd) || state < 0)
32615@@ -1919,7 +1919,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
32616 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
32617 {
32618 struct gpd_cpu_data *cpu_data;
32619- struct cpuidle_state *idle_state;
32620+ cpuidle_state_no_const *idle_state;
32621 int ret = 0;
32622
32623 if (IS_ERR_OR_NULL(genpd))
32624diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
32625index e6ee5e8..98ad7fc 100644
32626--- a/drivers/base/power/wakeup.c
32627+++ b/drivers/base/power/wakeup.c
32628@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
32629 * They need to be modified together atomically, so it's better to use one
32630 * atomic variable to hold them both.
32631 */
32632-static atomic_t combined_event_count = ATOMIC_INIT(0);
32633+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
32634
32635 #define IN_PROGRESS_BITS (sizeof(int) * 4)
32636 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
32637
32638 static void split_counters(unsigned int *cnt, unsigned int *inpr)
32639 {
32640- unsigned int comb = atomic_read(&combined_event_count);
32641+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
32642
32643 *cnt = (comb >> IN_PROGRESS_BITS);
32644 *inpr = comb & MAX_IN_PROGRESS;
32645@@ -389,7 +389,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
32646 ws->start_prevent_time = ws->last_time;
32647
32648 /* Increment the counter of events in progress. */
32649- cec = atomic_inc_return(&combined_event_count);
32650+ cec = atomic_inc_return_unchecked(&combined_event_count);
32651
32652 trace_wakeup_source_activate(ws->name, cec);
32653 }
32654@@ -515,7 +515,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
32655 * Increment the counter of registered wakeup events and decrement the
32656 * couter of wakeup events in progress simultaneously.
32657 */
32658- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
32659+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
32660 trace_wakeup_source_deactivate(ws->name, cec);
32661
32662 split_counters(&cnt, &inpr);
32663diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
32664index e8d11b6..7b1b36f 100644
32665--- a/drivers/base/syscore.c
32666+++ b/drivers/base/syscore.c
32667@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
32668 void register_syscore_ops(struct syscore_ops *ops)
32669 {
32670 mutex_lock(&syscore_ops_lock);
32671- list_add_tail(&ops->node, &syscore_ops_list);
32672+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
32673 mutex_unlock(&syscore_ops_lock);
32674 }
32675 EXPORT_SYMBOL_GPL(register_syscore_ops);
32676@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
32677 void unregister_syscore_ops(struct syscore_ops *ops)
32678 {
32679 mutex_lock(&syscore_ops_lock);
32680- list_del(&ops->node);
32681+ pax_list_del((struct list_head *)&ops->node);
32682 mutex_unlock(&syscore_ops_lock);
32683 }
32684 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
32685diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
32686index ade58bc..867143d 100644
32687--- a/drivers/block/cciss.c
32688+++ b/drivers/block/cciss.c
32689@@ -1196,6 +1196,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
32690 int err;
32691 u32 cp;
32692
32693+ memset(&arg64, 0, sizeof(arg64));
32694+
32695 err = 0;
32696 err |=
32697 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
32698@@ -3005,7 +3007,7 @@ static void start_io(ctlr_info_t *h)
32699 while (!list_empty(&h->reqQ)) {
32700 c = list_entry(h->reqQ.next, CommandList_struct, list);
32701 /* can't do anything if fifo is full */
32702- if ((h->access.fifo_full(h))) {
32703+ if ((h->access->fifo_full(h))) {
32704 dev_warn(&h->pdev->dev, "fifo full\n");
32705 break;
32706 }
32707@@ -3015,7 +3017,7 @@ static void start_io(ctlr_info_t *h)
32708 h->Qdepth--;
32709
32710 /* Tell the controller execute command */
32711- h->access.submit_command(h, c);
32712+ h->access->submit_command(h, c);
32713
32714 /* Put job onto the completed Q */
32715 addQ(&h->cmpQ, c);
32716@@ -3441,17 +3443,17 @@ startio:
32717
32718 static inline unsigned long get_next_completion(ctlr_info_t *h)
32719 {
32720- return h->access.command_completed(h);
32721+ return h->access->command_completed(h);
32722 }
32723
32724 static inline int interrupt_pending(ctlr_info_t *h)
32725 {
32726- return h->access.intr_pending(h);
32727+ return h->access->intr_pending(h);
32728 }
32729
32730 static inline long interrupt_not_for_us(ctlr_info_t *h)
32731 {
32732- return ((h->access.intr_pending(h) == 0) ||
32733+ return ((h->access->intr_pending(h) == 0) ||
32734 (h->interrupts_enabled == 0));
32735 }
32736
32737@@ -3484,7 +3486,7 @@ static inline u32 next_command(ctlr_info_t *h)
32738 u32 a;
32739
32740 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
32741- return h->access.command_completed(h);
32742+ return h->access->command_completed(h);
32743
32744 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
32745 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
32746@@ -4041,7 +4043,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
32747 trans_support & CFGTBL_Trans_use_short_tags);
32748
32749 /* Change the access methods to the performant access methods */
32750- h->access = SA5_performant_access;
32751+ h->access = &SA5_performant_access;
32752 h->transMethod = CFGTBL_Trans_Performant;
32753
32754 return;
32755@@ -4310,7 +4312,7 @@ static int cciss_pci_init(ctlr_info_t *h)
32756 if (prod_index < 0)
32757 return -ENODEV;
32758 h->product_name = products[prod_index].product_name;
32759- h->access = *(products[prod_index].access);
32760+ h->access = products[prod_index].access;
32761
32762 if (cciss_board_disabled(h)) {
32763 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
32764@@ -5032,7 +5034,7 @@ reinit_after_soft_reset:
32765 }
32766
32767 /* make sure the board interrupts are off */
32768- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32769+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32770 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
32771 if (rc)
32772 goto clean2;
32773@@ -5082,7 +5084,7 @@ reinit_after_soft_reset:
32774 * fake ones to scoop up any residual completions.
32775 */
32776 spin_lock_irqsave(&h->lock, flags);
32777- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32778+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32779 spin_unlock_irqrestore(&h->lock, flags);
32780 free_irq(h->intr[h->intr_mode], h);
32781 rc = cciss_request_irq(h, cciss_msix_discard_completions,
32782@@ -5102,9 +5104,9 @@ reinit_after_soft_reset:
32783 dev_info(&h->pdev->dev, "Board READY.\n");
32784 dev_info(&h->pdev->dev,
32785 "Waiting for stale completions to drain.\n");
32786- h->access.set_intr_mask(h, CCISS_INTR_ON);
32787+ h->access->set_intr_mask(h, CCISS_INTR_ON);
32788 msleep(10000);
32789- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32790+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32791
32792 rc = controller_reset_failed(h->cfgtable);
32793 if (rc)
32794@@ -5127,7 +5129,7 @@ reinit_after_soft_reset:
32795 cciss_scsi_setup(h);
32796
32797 /* Turn the interrupts on so we can service requests */
32798- h->access.set_intr_mask(h, CCISS_INTR_ON);
32799+ h->access->set_intr_mask(h, CCISS_INTR_ON);
32800
32801 /* Get the firmware version */
32802 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
32803@@ -5199,7 +5201,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
32804 kfree(flush_buf);
32805 if (return_code != IO_OK)
32806 dev_warn(&h->pdev->dev, "Error flushing cache\n");
32807- h->access.set_intr_mask(h, CCISS_INTR_OFF);
32808+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
32809 free_irq(h->intr[h->intr_mode], h);
32810 }
32811
32812diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
32813index 7fda30e..eb5dfe0 100644
32814--- a/drivers/block/cciss.h
32815+++ b/drivers/block/cciss.h
32816@@ -101,7 +101,7 @@ struct ctlr_info
32817 /* information about each logical volume */
32818 drive_info_struct *drv[CISS_MAX_LUN];
32819
32820- struct access_method access;
32821+ struct access_method *access;
32822
32823 /* queue and queue Info */
32824 struct list_head reqQ;
32825diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
32826index 3f08713..56a586a 100644
32827--- a/drivers/block/cpqarray.c
32828+++ b/drivers/block/cpqarray.c
32829@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
32830 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
32831 goto Enomem4;
32832 }
32833- hba[i]->access.set_intr_mask(hba[i], 0);
32834+ hba[i]->access->set_intr_mask(hba[i], 0);
32835 if (request_irq(hba[i]->intr, do_ida_intr,
32836 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
32837 {
32838@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
32839 add_timer(&hba[i]->timer);
32840
32841 /* Enable IRQ now that spinlock and rate limit timer are set up */
32842- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32843+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
32844
32845 for(j=0; j<NWD; j++) {
32846 struct gendisk *disk = ida_gendisk[i][j];
32847@@ -694,7 +694,7 @@ DBGINFO(
32848 for(i=0; i<NR_PRODUCTS; i++) {
32849 if (board_id == products[i].board_id) {
32850 c->product_name = products[i].product_name;
32851- c->access = *(products[i].access);
32852+ c->access = products[i].access;
32853 break;
32854 }
32855 }
32856@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
32857 hba[ctlr]->intr = intr;
32858 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
32859 hba[ctlr]->product_name = products[j].product_name;
32860- hba[ctlr]->access = *(products[j].access);
32861+ hba[ctlr]->access = products[j].access;
32862 hba[ctlr]->ctlr = ctlr;
32863 hba[ctlr]->board_id = board_id;
32864 hba[ctlr]->pci_dev = NULL; /* not PCI */
32865@@ -980,7 +980,7 @@ static void start_io(ctlr_info_t *h)
32866
32867 while((c = h->reqQ) != NULL) {
32868 /* Can't do anything if we're busy */
32869- if (h->access.fifo_full(h) == 0)
32870+ if (h->access->fifo_full(h) == 0)
32871 return;
32872
32873 /* Get the first entry from the request Q */
32874@@ -988,7 +988,7 @@ static void start_io(ctlr_info_t *h)
32875 h->Qdepth--;
32876
32877 /* Tell the controller to do our bidding */
32878- h->access.submit_command(h, c);
32879+ h->access->submit_command(h, c);
32880
32881 /* Get onto the completion Q */
32882 addQ(&h->cmpQ, c);
32883@@ -1050,7 +1050,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32884 unsigned long flags;
32885 __u32 a,a1;
32886
32887- istat = h->access.intr_pending(h);
32888+ istat = h->access->intr_pending(h);
32889 /* Is this interrupt for us? */
32890 if (istat == 0)
32891 return IRQ_NONE;
32892@@ -1061,7 +1061,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
32893 */
32894 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
32895 if (istat & FIFO_NOT_EMPTY) {
32896- while((a = h->access.command_completed(h))) {
32897+ while((a = h->access->command_completed(h))) {
32898 a1 = a; a &= ~3;
32899 if ((c = h->cmpQ) == NULL)
32900 {
32901@@ -1449,11 +1449,11 @@ static int sendcmd(
32902 /*
32903 * Disable interrupt
32904 */
32905- info_p->access.set_intr_mask(info_p, 0);
32906+ info_p->access->set_intr_mask(info_p, 0);
32907 /* Make sure there is room in the command FIFO */
32908 /* Actually it should be completely empty at this time. */
32909 for (i = 200000; i > 0; i--) {
32910- temp = info_p->access.fifo_full(info_p);
32911+ temp = info_p->access->fifo_full(info_p);
32912 if (temp != 0) {
32913 break;
32914 }
32915@@ -1466,7 +1466,7 @@ DBG(
32916 /*
32917 * Send the cmd
32918 */
32919- info_p->access.submit_command(info_p, c);
32920+ info_p->access->submit_command(info_p, c);
32921 complete = pollcomplete(ctlr);
32922
32923 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
32924@@ -1549,9 +1549,9 @@ static int revalidate_allvol(ctlr_info_t *host)
32925 * we check the new geometry. Then turn interrupts back on when
32926 * we're done.
32927 */
32928- host->access.set_intr_mask(host, 0);
32929+ host->access->set_intr_mask(host, 0);
32930 getgeometry(ctlr);
32931- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
32932+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
32933
32934 for(i=0; i<NWD; i++) {
32935 struct gendisk *disk = ida_gendisk[ctlr][i];
32936@@ -1591,7 +1591,7 @@ static int pollcomplete(int ctlr)
32937 /* Wait (up to 2 seconds) for a command to complete */
32938
32939 for (i = 200000; i > 0; i--) {
32940- done = hba[ctlr]->access.command_completed(hba[ctlr]);
32941+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
32942 if (done == 0) {
32943 udelay(10); /* a short fixed delay */
32944 } else
32945diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
32946index be73e9d..7fbf140 100644
32947--- a/drivers/block/cpqarray.h
32948+++ b/drivers/block/cpqarray.h
32949@@ -99,7 +99,7 @@ struct ctlr_info {
32950 drv_info_t drv[NWD];
32951 struct proc_dir_entry *proc;
32952
32953- struct access_method access;
32954+ struct access_method *access;
32955
32956 cmdlist_t *reqQ;
32957 cmdlist_t *cmpQ;
32958diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
32959index 6b51afa..17e1191 100644
32960--- a/drivers/block/drbd/drbd_int.h
32961+++ b/drivers/block/drbd/drbd_int.h
32962@@ -582,7 +582,7 @@ struct drbd_epoch {
32963 struct drbd_tconn *tconn;
32964 struct list_head list;
32965 unsigned int barrier_nr;
32966- atomic_t epoch_size; /* increased on every request added. */
32967+ atomic_unchecked_t epoch_size; /* increased on every request added. */
32968 atomic_t active; /* increased on every req. added, and dec on every finished. */
32969 unsigned long flags;
32970 };
32971@@ -1011,7 +1011,7 @@ struct drbd_conf {
32972 int al_tr_cycle;
32973 int al_tr_pos; /* position of the next transaction in the journal */
32974 wait_queue_head_t seq_wait;
32975- atomic_t packet_seq;
32976+ atomic_unchecked_t packet_seq;
32977 unsigned int peer_seq;
32978 spinlock_t peer_seq_lock;
32979 unsigned int minor;
32980@@ -1527,7 +1527,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
32981 char __user *uoptval;
32982 int err;
32983
32984- uoptval = (char __user __force *)optval;
32985+ uoptval = (char __force_user *)optval;
32986
32987 set_fs(KERNEL_DS);
32988 if (level == SOL_SOCKET)
32989diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
32990index 8c13eeb..217adee 100644
32991--- a/drivers/block/drbd/drbd_main.c
32992+++ b/drivers/block/drbd/drbd_main.c
32993@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
32994 p->sector = sector;
32995 p->block_id = block_id;
32996 p->blksize = blksize;
32997- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
32998+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
32999 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33000 }
33001
33002@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33003 return -EIO;
33004 p->sector = cpu_to_be64(req->i.sector);
33005 p->block_id = (unsigned long)req;
33006- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33007+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33008 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33009 if (mdev->state.conn >= C_SYNC_SOURCE &&
33010 mdev->state.conn <= C_PAUSED_SYNC_T)
33011@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33012 {
33013 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33014
33015- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33016- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33017+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33018+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33019 kfree(tconn->current_epoch);
33020
33021 idr_destroy(&tconn->volumes);
33022diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33023index a9eccfc..f5efe87 100644
33024--- a/drivers/block/drbd/drbd_receiver.c
33025+++ b/drivers/block/drbd/drbd_receiver.c
33026@@ -833,7 +833,7 @@ int drbd_connected(struct drbd_conf *mdev)
33027 {
33028 int err;
33029
33030- atomic_set(&mdev->packet_seq, 0);
33031+ atomic_set_unchecked(&mdev->packet_seq, 0);
33032 mdev->peer_seq = 0;
33033
33034 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33035@@ -1191,7 +1191,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33036 do {
33037 next_epoch = NULL;
33038
33039- epoch_size = atomic_read(&epoch->epoch_size);
33040+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33041
33042 switch (ev & ~EV_CLEANUP) {
33043 case EV_PUT:
33044@@ -1231,7 +1231,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33045 rv = FE_DESTROYED;
33046 } else {
33047 epoch->flags = 0;
33048- atomic_set(&epoch->epoch_size, 0);
33049+ atomic_set_unchecked(&epoch->epoch_size, 0);
33050 /* atomic_set(&epoch->active, 0); is already zero */
33051 if (rv == FE_STILL_LIVE)
33052 rv = FE_RECYCLED;
33053@@ -1449,7 +1449,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33054 conn_wait_active_ee_empty(tconn);
33055 drbd_flush(tconn);
33056
33057- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33058+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33059 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33060 if (epoch)
33061 break;
33062@@ -1462,11 +1462,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33063 }
33064
33065 epoch->flags = 0;
33066- atomic_set(&epoch->epoch_size, 0);
33067+ atomic_set_unchecked(&epoch->epoch_size, 0);
33068 atomic_set(&epoch->active, 0);
33069
33070 spin_lock(&tconn->epoch_lock);
33071- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33072+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33073 list_add(&epoch->list, &tconn->current_epoch->list);
33074 tconn->current_epoch = epoch;
33075 tconn->epochs++;
33076@@ -2170,7 +2170,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33077
33078 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33079 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33080- atomic_inc(&tconn->current_epoch->epoch_size);
33081+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33082 err2 = drbd_drain_block(mdev, pi->size);
33083 if (!err)
33084 err = err2;
33085@@ -2204,7 +2204,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33086
33087 spin_lock(&tconn->epoch_lock);
33088 peer_req->epoch = tconn->current_epoch;
33089- atomic_inc(&peer_req->epoch->epoch_size);
33090+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33091 atomic_inc(&peer_req->epoch->active);
33092 spin_unlock(&tconn->epoch_lock);
33093
33094@@ -4346,7 +4346,7 @@ struct data_cmd {
33095 int expect_payload;
33096 size_t pkt_size;
33097 int (*fn)(struct drbd_tconn *, struct packet_info *);
33098-};
33099+} __do_const;
33100
33101 static struct data_cmd drbd_cmd_handler[] = {
33102 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33103@@ -4466,7 +4466,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33104 if (!list_empty(&tconn->current_epoch->list))
33105 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33106 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33107- atomic_set(&tconn->current_epoch->epoch_size, 0);
33108+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33109 tconn->send.seen_any_write_yet = false;
33110
33111 conn_info(tconn, "Connection closed\n");
33112@@ -5222,7 +5222,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33113 struct asender_cmd {
33114 size_t pkt_size;
33115 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33116-};
33117+} __do_const;
33118
33119 static struct asender_cmd asender_tbl[] = {
33120 [P_PING] = { 0, got_Ping },
33121diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33122index ae12512..37fa397 100644
33123--- a/drivers/block/loop.c
33124+++ b/drivers/block/loop.c
33125@@ -226,7 +226,7 @@ static int __do_lo_send_write(struct file *file,
33126 mm_segment_t old_fs = get_fs();
33127
33128 set_fs(get_ds());
33129- bw = file->f_op->write(file, buf, len, &pos);
33130+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33131 set_fs(old_fs);
33132 if (likely(bw == len))
33133 return 0;
33134diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33135index d620b44..587561e 100644
33136--- a/drivers/cdrom/cdrom.c
33137+++ b/drivers/cdrom/cdrom.c
33138@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33139 ENSURE(reset, CDC_RESET);
33140 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33141 cdi->mc_flags = 0;
33142- cdo->n_minors = 0;
33143 cdi->options = CDO_USE_FFLAGS;
33144
33145 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33146@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33147 else
33148 cdi->cdda_method = CDDA_OLD;
33149
33150- if (!cdo->generic_packet)
33151- cdo->generic_packet = cdrom_dummy_generic_packet;
33152+ if (!cdo->generic_packet) {
33153+ pax_open_kernel();
33154+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33155+ pax_close_kernel();
33156+ }
33157
33158 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33159 mutex_lock(&cdrom_mutex);
33160@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33161 if (cdi->exit)
33162 cdi->exit(cdi);
33163
33164- cdi->ops->n_minors--;
33165 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33166 }
33167
33168diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33169index d59cdcb..11afddf 100644
33170--- a/drivers/cdrom/gdrom.c
33171+++ b/drivers/cdrom/gdrom.c
33172@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33173 .audio_ioctl = gdrom_audio_ioctl,
33174 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33175 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33176- .n_minors = 1,
33177 };
33178
33179 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33180diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33181index 72bedad..8181ce1 100644
33182--- a/drivers/char/Kconfig
33183+++ b/drivers/char/Kconfig
33184@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33185
33186 config DEVKMEM
33187 bool "/dev/kmem virtual device support"
33188- default y
33189+ default n
33190+ depends on !GRKERNSEC_KMEM
33191 help
33192 Say Y here if you want to support the /dev/kmem device. The
33193 /dev/kmem device is rarely used, but can be used for certain
33194@@ -581,6 +582,7 @@ config DEVPORT
33195 bool
33196 depends on !M68K
33197 depends on ISA || PCI
33198+ depends on !GRKERNSEC_KMEM
33199 default y
33200
33201 source "drivers/s390/char/Kconfig"
33202diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33203index 2e04433..22afc64 100644
33204--- a/drivers/char/agp/frontend.c
33205+++ b/drivers/char/agp/frontend.c
33206@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33207 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33208 return -EFAULT;
33209
33210- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33211+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33212 return -EFAULT;
33213
33214 client = agp_find_client_by_pid(reserve.pid);
33215diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33216index 21cb980..f15107c 100644
33217--- a/drivers/char/genrtc.c
33218+++ b/drivers/char/genrtc.c
33219@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct file *file,
33220 switch (cmd) {
33221
33222 case RTC_PLL_GET:
33223+ memset(&pll, 0, sizeof(pll));
33224 if (get_rtc_pll(&pll))
33225 return -EINVAL;
33226 else
33227diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33228index fe6d4be..89f32100 100644
33229--- a/drivers/char/hpet.c
33230+++ b/drivers/char/hpet.c
33231@@ -571,7 +571,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33232 }
33233
33234 static int
33235-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33236+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33237 struct hpet_info *info)
33238 {
33239 struct hpet_timer __iomem *timer;
33240diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33241index 053201b0..8335cce 100644
33242--- a/drivers/char/ipmi/ipmi_msghandler.c
33243+++ b/drivers/char/ipmi/ipmi_msghandler.c
33244@@ -420,7 +420,7 @@ struct ipmi_smi {
33245 struct proc_dir_entry *proc_dir;
33246 char proc_dir_name[10];
33247
33248- atomic_t stats[IPMI_NUM_STATS];
33249+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33250
33251 /*
33252 * run_to_completion duplicate of smb_info, smi_info
33253@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33254
33255
33256 #define ipmi_inc_stat(intf, stat) \
33257- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33258+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33259 #define ipmi_get_stat(intf, stat) \
33260- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33261+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33262
33263 static int is_lan_addr(struct ipmi_addr *addr)
33264 {
33265@@ -2884,7 +2884,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33266 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33267 init_waitqueue_head(&intf->waitq);
33268 for (i = 0; i < IPMI_NUM_STATS; i++)
33269- atomic_set(&intf->stats[i], 0);
33270+ atomic_set_unchecked(&intf->stats[i], 0);
33271
33272 intf->proc_dir = NULL;
33273
33274diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33275index 1c7fdcd..4899100 100644
33276--- a/drivers/char/ipmi/ipmi_si_intf.c
33277+++ b/drivers/char/ipmi/ipmi_si_intf.c
33278@@ -275,7 +275,7 @@ struct smi_info {
33279 unsigned char slave_addr;
33280
33281 /* Counters and things for the proc filesystem. */
33282- atomic_t stats[SI_NUM_STATS];
33283+ atomic_unchecked_t stats[SI_NUM_STATS];
33284
33285 struct task_struct *thread;
33286
33287@@ -284,9 +284,9 @@ struct smi_info {
33288 };
33289
33290 #define smi_inc_stat(smi, stat) \
33291- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33292+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33293 #define smi_get_stat(smi, stat) \
33294- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33295+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33296
33297 #define SI_MAX_PARMS 4
33298
33299@@ -3225,7 +3225,7 @@ static int try_smi_init(struct smi_info *new_smi)
33300 atomic_set(&new_smi->req_events, 0);
33301 new_smi->run_to_completion = 0;
33302 for (i = 0; i < SI_NUM_STATS; i++)
33303- atomic_set(&new_smi->stats[i], 0);
33304+ atomic_set_unchecked(&new_smi->stats[i], 0);
33305
33306 new_smi->interrupt_disabled = 1;
33307 atomic_set(&new_smi->stop_operation, 0);
33308diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33309index c6fa3bc..4ca3e42 100644
33310--- a/drivers/char/mem.c
33311+++ b/drivers/char/mem.c
33312@@ -18,6 +18,7 @@
33313 #include <linux/raw.h>
33314 #include <linux/tty.h>
33315 #include <linux/capability.h>
33316+#include <linux/security.h>
33317 #include <linux/ptrace.h>
33318 #include <linux/device.h>
33319 #include <linux/highmem.h>
33320@@ -37,6 +38,10 @@
33321
33322 #define DEVPORT_MINOR 4
33323
33324+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33325+extern const struct file_operations grsec_fops;
33326+#endif
33327+
33328 static inline unsigned long size_inside_page(unsigned long start,
33329 unsigned long size)
33330 {
33331@@ -68,9 +73,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33332
33333 while (cursor < to) {
33334 if (!devmem_is_allowed(pfn)) {
33335+#ifdef CONFIG_GRKERNSEC_KMEM
33336+ gr_handle_mem_readwrite(from, to);
33337+#else
33338 printk(KERN_INFO
33339 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33340 current->comm, from, to);
33341+#endif
33342 return 0;
33343 }
33344 cursor += PAGE_SIZE;
33345@@ -78,6 +87,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33346 }
33347 return 1;
33348 }
33349+#elif defined(CONFIG_GRKERNSEC_KMEM)
33350+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33351+{
33352+ return 0;
33353+}
33354 #else
33355 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33356 {
33357@@ -120,6 +134,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33358
33359 while (count > 0) {
33360 unsigned long remaining;
33361+ char *temp;
33362
33363 sz = size_inside_page(p, count);
33364
33365@@ -135,7 +150,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
33366 if (!ptr)
33367 return -EFAULT;
33368
33369- remaining = copy_to_user(buf, ptr, sz);
33370+#ifdef CONFIG_PAX_USERCOPY
33371+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33372+ if (!temp) {
33373+ unxlate_dev_mem_ptr(p, ptr);
33374+ return -ENOMEM;
33375+ }
33376+ memcpy(temp, ptr, sz);
33377+#else
33378+ temp = ptr;
33379+#endif
33380+
33381+ remaining = copy_to_user(buf, temp, sz);
33382+
33383+#ifdef CONFIG_PAX_USERCOPY
33384+ kfree(temp);
33385+#endif
33386+
33387 unxlate_dev_mem_ptr(p, ptr);
33388 if (remaining)
33389 return -EFAULT;
33390@@ -398,9 +429,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33391 size_t count, loff_t *ppos)
33392 {
33393 unsigned long p = *ppos;
33394- ssize_t low_count, read, sz;
33395+ ssize_t low_count, read, sz, err = 0;
33396 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
33397- int err = 0;
33398
33399 read = 0;
33400 if (p < (unsigned long) high_memory) {
33401@@ -422,6 +452,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33402 }
33403 #endif
33404 while (low_count > 0) {
33405+ char *temp;
33406+
33407 sz = size_inside_page(p, low_count);
33408
33409 /*
33410@@ -431,7 +463,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
33411 */
33412 kbuf = xlate_dev_kmem_ptr((char *)p);
33413
33414- if (copy_to_user(buf, kbuf, sz))
33415+#ifdef CONFIG_PAX_USERCOPY
33416+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
33417+ if (!temp)
33418+ return -ENOMEM;
33419+ memcpy(temp, kbuf, sz);
33420+#else
33421+ temp = kbuf;
33422+#endif
33423+
33424+ err = copy_to_user(buf, temp, sz);
33425+
33426+#ifdef CONFIG_PAX_USERCOPY
33427+ kfree(temp);
33428+#endif
33429+
33430+ if (err)
33431 return -EFAULT;
33432 buf += sz;
33433 p += sz;
33434@@ -833,6 +880,9 @@ static const struct memdev {
33435 #ifdef CONFIG_CRASH_DUMP
33436 [12] = { "oldmem", 0, &oldmem_fops, NULL },
33437 #endif
33438+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33439+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
33440+#endif
33441 };
33442
33443 static int memory_open(struct inode *inode, struct file *filp)
33444diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
33445index 9df78e2..01ba9ae 100644
33446--- a/drivers/char/nvram.c
33447+++ b/drivers/char/nvram.c
33448@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
33449
33450 spin_unlock_irq(&rtc_lock);
33451
33452- if (copy_to_user(buf, contents, tmp - contents))
33453+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
33454 return -EFAULT;
33455
33456 *ppos = i;
33457diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
33458index b66eaa0..2619d1b 100644
33459--- a/drivers/char/pcmcia/synclink_cs.c
33460+++ b/drivers/char/pcmcia/synclink_cs.c
33461@@ -2348,9 +2348,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33462
33463 if (debug_level >= DEBUG_LEVEL_INFO)
33464 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
33465- __FILE__,__LINE__, info->device_name, port->count);
33466+ __FILE__,__LINE__, info->device_name, atomic_read(&port->count));
33467
33468- WARN_ON(!port->count);
33469+ WARN_ON(!atomic_read(&port->count));
33470
33471 if (tty_port_close_start(port, tty, filp) == 0)
33472 goto cleanup;
33473@@ -2368,7 +2368,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
33474 cleanup:
33475 if (debug_level >= DEBUG_LEVEL_INFO)
33476 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__,
33477- tty->driver->name, port->count);
33478+ tty->driver->name, atomic_read(&port->count));
33479 }
33480
33481 /* Wait until the transmitter is empty.
33482@@ -2510,7 +2510,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33483
33484 if (debug_level >= DEBUG_LEVEL_INFO)
33485 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
33486- __FILE__,__LINE__,tty->driver->name, port->count);
33487+ __FILE__,__LINE__,tty->driver->name, atomic_read(&port->count));
33488
33489 /* If port is closing, signal caller to try again */
33490 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
33491@@ -2530,11 +2530,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
33492 goto cleanup;
33493 }
33494 spin_lock(&port->lock);
33495- port->count++;
33496+ atomic_inc(&port->count);
33497 spin_unlock(&port->lock);
33498 spin_unlock_irqrestore(&info->netlock, flags);
33499
33500- if (port->count == 1) {
33501+ if (atomic_read(&port->count) == 1) {
33502 /* 1st open on this device, init hardware */
33503 retval = startup(info, tty);
33504 if (retval < 0)
33505@@ -3889,7 +3889,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
33506 unsigned short new_crctype;
33507
33508 /* return error if TTY interface open */
33509- if (info->port.count)
33510+ if (atomic_read(&info->port.count))
33511 return -EBUSY;
33512
33513 switch (encoding)
33514@@ -3992,7 +3992,7 @@ static int hdlcdev_open(struct net_device *dev)
33515
33516 /* arbitrate between network and tty opens */
33517 spin_lock_irqsave(&info->netlock, flags);
33518- if (info->port.count != 0 || info->netcount != 0) {
33519+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
33520 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
33521 spin_unlock_irqrestore(&info->netlock, flags);
33522 return -EBUSY;
33523@@ -4081,7 +4081,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
33524 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
33525
33526 /* return error if TTY interface open */
33527- if (info->port.count)
33528+ if (atomic_read(&info->port.count))
33529 return -EBUSY;
33530
33531 if (cmd != SIOCWANDEV)
33532diff --git a/drivers/char/random.c b/drivers/char/random.c
33533index 85e81ec..a129a39 100644
33534--- a/drivers/char/random.c
33535+++ b/drivers/char/random.c
33536@@ -272,8 +272,13 @@
33537 /*
33538 * Configuration information
33539 */
33540+#ifdef CONFIG_GRKERNSEC_RANDNET
33541+#define INPUT_POOL_WORDS 512
33542+#define OUTPUT_POOL_WORDS 128
33543+#else
33544 #define INPUT_POOL_WORDS 128
33545 #define OUTPUT_POOL_WORDS 32
33546+#endif
33547 #define SEC_XFER_SIZE 512
33548 #define EXTRACT_SIZE 10
33549
33550@@ -313,10 +318,17 @@ static struct poolinfo {
33551 int poolwords;
33552 int tap1, tap2, tap3, tap4, tap5;
33553 } poolinfo_table[] = {
33554+#ifdef CONFIG_GRKERNSEC_RANDNET
33555+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
33556+ { 512, 411, 308, 208, 104, 1 },
33557+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
33558+ { 128, 103, 76, 51, 25, 1 },
33559+#else
33560 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
33561 { 128, 103, 76, 51, 25, 1 },
33562 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
33563 { 32, 26, 20, 14, 7, 1 },
33564+#endif
33565 #if 0
33566 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
33567 { 2048, 1638, 1231, 819, 411, 1 },
33568@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
33569 input_rotate += i ? 7 : 14;
33570 }
33571
33572- ACCESS_ONCE(r->input_rotate) = input_rotate;
33573- ACCESS_ONCE(r->add_ptr) = i;
33574+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
33575+ ACCESS_ONCE_RW(r->add_ptr) = i;
33576 smp_wmb();
33577
33578 if (out)
33579@@ -1020,7 +1032,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
33580
33581 extract_buf(r, tmp);
33582 i = min_t(int, nbytes, EXTRACT_SIZE);
33583- if (copy_to_user(buf, tmp, i)) {
33584+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
33585 ret = -EFAULT;
33586 break;
33587 }
33588@@ -1356,7 +1368,7 @@ EXPORT_SYMBOL(generate_random_uuid);
33589 #include <linux/sysctl.h>
33590
33591 static int min_read_thresh = 8, min_write_thresh;
33592-static int max_read_thresh = INPUT_POOL_WORDS * 32;
33593+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
33594 static int max_write_thresh = INPUT_POOL_WORDS * 32;
33595 static char sysctl_bootid[16];
33596
33597@@ -1372,7 +1384,7 @@ static char sysctl_bootid[16];
33598 static int proc_do_uuid(ctl_table *table, int write,
33599 void __user *buffer, size_t *lenp, loff_t *ppos)
33600 {
33601- ctl_table fake_table;
33602+ ctl_table_no_const fake_table;
33603 unsigned char buf[64], tmp_uuid[16], *uuid;
33604
33605 uuid = table->data;
33606diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
33607index d780295..b29f3a8 100644
33608--- a/drivers/char/sonypi.c
33609+++ b/drivers/char/sonypi.c
33610@@ -54,6 +54,7 @@
33611
33612 #include <asm/uaccess.h>
33613 #include <asm/io.h>
33614+#include <asm/local.h>
33615
33616 #include <linux/sonypi.h>
33617
33618@@ -490,7 +491,7 @@ static struct sonypi_device {
33619 spinlock_t fifo_lock;
33620 wait_queue_head_t fifo_proc_list;
33621 struct fasync_struct *fifo_async;
33622- int open_count;
33623+ local_t open_count;
33624 int model;
33625 struct input_dev *input_jog_dev;
33626 struct input_dev *input_key_dev;
33627@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
33628 static int sonypi_misc_release(struct inode *inode, struct file *file)
33629 {
33630 mutex_lock(&sonypi_device.lock);
33631- sonypi_device.open_count--;
33632+ local_dec(&sonypi_device.open_count);
33633 mutex_unlock(&sonypi_device.lock);
33634 return 0;
33635 }
33636@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
33637 {
33638 mutex_lock(&sonypi_device.lock);
33639 /* Flush input queue on first open */
33640- if (!sonypi_device.open_count)
33641+ if (!local_read(&sonypi_device.open_count))
33642 kfifo_reset(&sonypi_device.fifo);
33643- sonypi_device.open_count++;
33644+ local_inc(&sonypi_device.open_count);
33645 mutex_unlock(&sonypi_device.lock);
33646
33647 return 0;
33648diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
33649index 93211df..c7805f7 100644
33650--- a/drivers/char/tpm/tpm.c
33651+++ b/drivers/char/tpm/tpm.c
33652@@ -410,7 +410,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
33653 chip->vendor.req_complete_val)
33654 goto out_recv;
33655
33656- if ((status == chip->vendor.req_canceled)) {
33657+ if (status == chip->vendor.req_canceled) {
33658 dev_err(chip->dev, "Operation Canceled\n");
33659 rc = -ECANCELED;
33660 goto out;
33661diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
33662index 56051d0..11cf3b7 100644
33663--- a/drivers/char/tpm/tpm_acpi.c
33664+++ b/drivers/char/tpm/tpm_acpi.c
33665@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
33666 virt = acpi_os_map_memory(start, len);
33667 if (!virt) {
33668 kfree(log->bios_event_log);
33669+ log->bios_event_log = NULL;
33670 printk("%s: ERROR - Unable to map memory\n", __func__);
33671 return -EIO;
33672 }
33673
33674- memcpy_fromio(log->bios_event_log, virt, len);
33675+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
33676
33677 acpi_os_unmap_memory(virt, len);
33678 return 0;
33679diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
33680index 84ddc55..1d32f1e 100644
33681--- a/drivers/char/tpm/tpm_eventlog.c
33682+++ b/drivers/char/tpm/tpm_eventlog.c
33683@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
33684 event = addr;
33685
33686 if ((event->event_type == 0 && event->event_size == 0) ||
33687- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
33688+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
33689 return NULL;
33690
33691 return addr;
33692@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
33693 return NULL;
33694
33695 if ((event->event_type == 0 && event->event_size == 0) ||
33696- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
33697+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
33698 return NULL;
33699
33700 (*pos)++;
33701@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
33702 int i;
33703
33704 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
33705- seq_putc(m, data[i]);
33706+ if (!seq_putc(m, data[i]))
33707+ return -EFAULT;
33708
33709 return 0;
33710 }
33711diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
33712index ee4dbea..69c817b 100644
33713--- a/drivers/char/virtio_console.c
33714+++ b/drivers/char/virtio_console.c
33715@@ -681,7 +681,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
33716 if (to_user) {
33717 ssize_t ret;
33718
33719- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
33720+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
33721 if (ret)
33722 return -EFAULT;
33723 } else {
33724@@ -780,7 +780,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
33725 if (!port_has_data(port) && !port->host_connected)
33726 return 0;
33727
33728- return fill_readbuf(port, ubuf, count, true);
33729+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
33730 }
33731
33732 static int wait_port_writable(struct port *port, bool nonblock)
33733diff --git a/drivers/clocksource/arm_generic.c b/drivers/clocksource/arm_generic.c
33734index 8ae1a61..9c00613 100644
33735--- a/drivers/clocksource/arm_generic.c
33736+++ b/drivers/clocksource/arm_generic.c
33737@@ -181,7 +181,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
33738 return NOTIFY_OK;
33739 }
33740
33741-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
33742+static struct notifier_block arch_timer_cpu_nb = {
33743 .notifier_call = arch_timer_cpu_notify,
33744 };
33745
33746diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
33747index fce2000..1110478 100644
33748--- a/drivers/connector/cn_proc.c
33749+++ b/drivers/connector/cn_proc.c
33750@@ -313,6 +313,12 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
33751 (task_active_pid_ns(current) != &init_pid_ns))
33752 return;
33753
33754+ /* Can only change if privileged. */
33755+ if (!capable(CAP_NET_ADMIN)) {
33756+ err = EPERM;
33757+ goto out;
33758+ }
33759+
33760 mc_op = (enum proc_cn_mcast_op *)msg->data;
33761 switch (*mc_op) {
33762 case PROC_CN_MCAST_LISTEN:
33763@@ -325,6 +331,8 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
33764 err = EINVAL;
33765 break;
33766 }
33767+
33768+out:
33769 cn_proc_ack(err, msg->seq, msg->ack);
33770 }
33771
33772diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
33773index 7b0d49d..134fac9 100644
33774--- a/drivers/cpufreq/acpi-cpufreq.c
33775+++ b/drivers/cpufreq/acpi-cpufreq.c
33776@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
33777 return sprintf(buf, "%u\n", boost_enabled);
33778 }
33779
33780-static struct global_attr global_boost = __ATTR(boost, 0644,
33781+static global_attr_no_const global_boost = __ATTR(boost, 0644,
33782 show_global_boost,
33783 store_global_boost);
33784
33785@@ -712,8 +712,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
33786 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
33787 per_cpu(acfreq_data, cpu) = data;
33788
33789- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
33790- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
33791+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
33792+ pax_open_kernel();
33793+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
33794+ pax_close_kernel();
33795+ }
33796
33797 result = acpi_processor_register_performance(data->acpi_data, cpu);
33798 if (result)
33799@@ -835,7 +838,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
33800 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
33801 break;
33802 case ACPI_ADR_SPACE_FIXED_HARDWARE:
33803- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
33804+ pax_open_kernel();
33805+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
33806+ pax_close_kernel();
33807 policy->cur = get_cur_freq_on_cpu(cpu);
33808 break;
33809 default:
33810@@ -846,8 +851,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
33811 acpi_processor_notify_smm(THIS_MODULE);
33812
33813 /* Check for APERF/MPERF support in hardware */
33814- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
33815- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
33816+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
33817+ pax_open_kernel();
33818+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
33819+ pax_close_kernel();
33820+ }
33821
33822 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
33823 for (i = 0; i < perf->state_count; i++)
33824diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
33825index 1f93dbd..305cef1 100644
33826--- a/drivers/cpufreq/cpufreq.c
33827+++ b/drivers/cpufreq/cpufreq.c
33828@@ -1843,7 +1843,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
33829 return NOTIFY_OK;
33830 }
33831
33832-static struct notifier_block __refdata cpufreq_cpu_notifier = {
33833+static struct notifier_block cpufreq_cpu_notifier = {
33834 .notifier_call = cpufreq_cpu_callback,
33835 };
33836
33837@@ -1875,8 +1875,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
33838
33839 pr_debug("trying to register driver %s\n", driver_data->name);
33840
33841- if (driver_data->setpolicy)
33842- driver_data->flags |= CPUFREQ_CONST_LOOPS;
33843+ if (driver_data->setpolicy) {
33844+ pax_open_kernel();
33845+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
33846+ pax_close_kernel();
33847+ }
33848
33849 spin_lock_irqsave(&cpufreq_driver_lock, flags);
33850 if (cpufreq_driver) {
33851diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
33852index 6c5f1d3..c7e2f35e 100644
33853--- a/drivers/cpufreq/cpufreq_governor.c
33854+++ b/drivers/cpufreq/cpufreq_governor.c
33855@@ -243,7 +243,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
33856 * governor, thus we are bound to jiffes/HZ
33857 */
33858 if (dbs_data->governor == GOV_CONSERVATIVE) {
33859- struct cs_ops *ops = dbs_data->gov_ops;
33860+ const struct cs_ops *ops = dbs_data->gov_ops;
33861
33862 cpufreq_register_notifier(ops->notifier_block,
33863 CPUFREQ_TRANSITION_NOTIFIER);
33864@@ -251,7 +251,7 @@ int cpufreq_governor_dbs(struct dbs_data *dbs_data,
33865 dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
33866 jiffies_to_usecs(10);
33867 } else {
33868- struct od_ops *ops = dbs_data->gov_ops;
33869+ const struct od_ops *ops = dbs_data->gov_ops;
33870
33871 od_tuners->io_is_busy = ops->io_busy();
33872 }
33873@@ -268,7 +268,7 @@ second_time:
33874 cs_dbs_info->enable = 1;
33875 cs_dbs_info->requested_freq = policy->cur;
33876 } else {
33877- struct od_ops *ops = dbs_data->gov_ops;
33878+ const struct od_ops *ops = dbs_data->gov_ops;
33879 od_dbs_info->rate_mult = 1;
33880 od_dbs_info->sample_type = OD_NORMAL_SAMPLE;
33881 ops->powersave_bias_init_cpu(cpu);
33882@@ -289,7 +289,7 @@ second_time:
33883 mutex_destroy(&cpu_cdbs->timer_mutex);
33884 dbs_data->enable--;
33885 if (!dbs_data->enable) {
33886- struct cs_ops *ops = dbs_data->gov_ops;
33887+ const struct cs_ops *ops = dbs_data->gov_ops;
33888
33889 sysfs_remove_group(cpufreq_global_kobject,
33890 dbs_data->attr_group);
33891diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
33892index f661654..6c8e638 100644
33893--- a/drivers/cpufreq/cpufreq_governor.h
33894+++ b/drivers/cpufreq/cpufreq_governor.h
33895@@ -142,7 +142,7 @@ struct dbs_data {
33896 void (*gov_check_cpu)(int cpu, unsigned int load);
33897
33898 /* Governor specific ops, see below */
33899- void *gov_ops;
33900+ const void *gov_ops;
33901 };
33902
33903 /* Governor specific ops, will be passed to dbs_data->gov_ops */
33904diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
33905index 9d7732b..0b1a793 100644
33906--- a/drivers/cpufreq/cpufreq_stats.c
33907+++ b/drivers/cpufreq/cpufreq_stats.c
33908@@ -340,7 +340,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
33909 }
33910
33911 /* priority=1 so this will get called before cpufreq_remove_dev */
33912-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
33913+static struct notifier_block cpufreq_stat_cpu_notifier = {
33914 .notifier_call = cpufreq_stat_cpu_callback,
33915 .priority = 1,
33916 };
33917diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
33918index 827629c9..0bc6a03 100644
33919--- a/drivers/cpufreq/p4-clockmod.c
33920+++ b/drivers/cpufreq/p4-clockmod.c
33921@@ -167,10 +167,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
33922 case 0x0F: /* Core Duo */
33923 case 0x16: /* Celeron Core */
33924 case 0x1C: /* Atom */
33925- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33926+ pax_open_kernel();
33927+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33928+ pax_close_kernel();
33929 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
33930 case 0x0D: /* Pentium M (Dothan) */
33931- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33932+ pax_open_kernel();
33933+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33934+ pax_close_kernel();
33935 /* fall through */
33936 case 0x09: /* Pentium M (Banias) */
33937 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
33938@@ -182,7 +186,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
33939
33940 /* on P-4s, the TSC runs with constant frequency independent whether
33941 * throttling is active or not. */
33942- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33943+ pax_open_kernel();
33944+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
33945+ pax_close_kernel();
33946
33947 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
33948 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
33949diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
33950index 3a953d5..f5993f6 100644
33951--- a/drivers/cpufreq/speedstep-centrino.c
33952+++ b/drivers/cpufreq/speedstep-centrino.c
33953@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
33954 !cpu_has(cpu, X86_FEATURE_EST))
33955 return -ENODEV;
33956
33957- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
33958- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
33959+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
33960+ pax_open_kernel();
33961+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
33962+ pax_close_kernel();
33963+ }
33964
33965 if (policy->cpu != 0)
33966 return -ENODEV;
33967diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
33968index e1f6860..f8de20b 100644
33969--- a/drivers/cpuidle/cpuidle.c
33970+++ b/drivers/cpuidle/cpuidle.c
33971@@ -279,7 +279,7 @@ static int poll_idle(struct cpuidle_device *dev,
33972
33973 static void poll_idle_init(struct cpuidle_driver *drv)
33974 {
33975- struct cpuidle_state *state = &drv->states[0];
33976+ cpuidle_state_no_const *state = &drv->states[0];
33977
33978 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
33979 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
33980diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
33981index ea2f8e7..70ac501 100644
33982--- a/drivers/cpuidle/governor.c
33983+++ b/drivers/cpuidle/governor.c
33984@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
33985 mutex_lock(&cpuidle_lock);
33986 if (__cpuidle_find_governor(gov->name) == NULL) {
33987 ret = 0;
33988- list_add_tail(&gov->governor_list, &cpuidle_governors);
33989+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
33990 if (!cpuidle_curr_governor ||
33991 cpuidle_curr_governor->rating < gov->rating)
33992 cpuidle_switch_governor(gov);
33993@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
33994 new_gov = cpuidle_replace_governor(gov->rating);
33995 cpuidle_switch_governor(new_gov);
33996 }
33997- list_del(&gov->governor_list);
33998+ pax_list_del((struct list_head *)&gov->governor_list);
33999 mutex_unlock(&cpuidle_lock);
34000 }
34001
34002diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34003index 428754a..8bdf9cc 100644
34004--- a/drivers/cpuidle/sysfs.c
34005+++ b/drivers/cpuidle/sysfs.c
34006@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34007 NULL
34008 };
34009
34010-static struct attribute_group cpuidle_attr_group = {
34011+static attribute_group_no_const cpuidle_attr_group = {
34012 .attrs = cpuidle_default_attrs,
34013 .name = "cpuidle",
34014 };
34015diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34016index 3b36797..289c16a 100644
34017--- a/drivers/devfreq/devfreq.c
34018+++ b/drivers/devfreq/devfreq.c
34019@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34020 goto err_out;
34021 }
34022
34023- list_add(&governor->node, &devfreq_governor_list);
34024+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34025
34026 list_for_each_entry(devfreq, &devfreq_list, node) {
34027 int ret = 0;
34028@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34029 }
34030 }
34031
34032- list_del(&governor->node);
34033+ pax_list_del((struct list_head *)&governor->node);
34034 err_out:
34035 mutex_unlock(&devfreq_list_lock);
34036
34037diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34038index b70709b..1d8d02a 100644
34039--- a/drivers/dma/sh/shdma.c
34040+++ b/drivers/dma/sh/shdma.c
34041@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34042 return ret;
34043 }
34044
34045-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34046+static struct notifier_block sh_dmae_nmi_notifier = {
34047 .notifier_call = sh_dmae_nmi_handler,
34048
34049 /* Run before NMI debug handler and KGDB */
34050diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34051index 0ca1ca7..6e6f454 100644
34052--- a/drivers/edac/edac_mc_sysfs.c
34053+++ b/drivers/edac/edac_mc_sysfs.c
34054@@ -148,7 +148,7 @@ static const char *edac_caps[] = {
34055 struct dev_ch_attribute {
34056 struct device_attribute attr;
34057 int channel;
34058-};
34059+} __do_const;
34060
34061 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34062 struct dev_ch_attribute dev_attr_legacy_##_name = \
34063diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34064index 0056c4d..23b54d9 100644
34065--- a/drivers/edac/edac_pci_sysfs.c
34066+++ b/drivers/edac/edac_pci_sysfs.c
34067@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34068 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34069 static int edac_pci_poll_msec = 1000; /* one second workq period */
34070
34071-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34072-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34073+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34074+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34075
34076 static struct kobject *edac_pci_top_main_kobj;
34077 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34078@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34079 void *value;
34080 ssize_t(*show) (void *, char *);
34081 ssize_t(*store) (void *, const char *, size_t);
34082-};
34083+} __do_const;
34084
34085 /* Set of show/store abstract level functions for PCI Parity object */
34086 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34087@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34088 edac_printk(KERN_CRIT, EDAC_PCI,
34089 "Signaled System Error on %s\n",
34090 pci_name(dev));
34091- atomic_inc(&pci_nonparity_count);
34092+ atomic_inc_unchecked(&pci_nonparity_count);
34093 }
34094
34095 if (status & (PCI_STATUS_PARITY)) {
34096@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34097 "Master Data Parity Error on %s\n",
34098 pci_name(dev));
34099
34100- atomic_inc(&pci_parity_count);
34101+ atomic_inc_unchecked(&pci_parity_count);
34102 }
34103
34104 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34105@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34106 "Detected Parity Error on %s\n",
34107 pci_name(dev));
34108
34109- atomic_inc(&pci_parity_count);
34110+ atomic_inc_unchecked(&pci_parity_count);
34111 }
34112 }
34113
34114@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34115 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34116 "Signaled System Error on %s\n",
34117 pci_name(dev));
34118- atomic_inc(&pci_nonparity_count);
34119+ atomic_inc_unchecked(&pci_nonparity_count);
34120 }
34121
34122 if (status & (PCI_STATUS_PARITY)) {
34123@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34124 "Master Data Parity Error on "
34125 "%s\n", pci_name(dev));
34126
34127- atomic_inc(&pci_parity_count);
34128+ atomic_inc_unchecked(&pci_parity_count);
34129 }
34130
34131 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34132@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34133 "Detected Parity Error on %s\n",
34134 pci_name(dev));
34135
34136- atomic_inc(&pci_parity_count);
34137+ atomic_inc_unchecked(&pci_parity_count);
34138 }
34139 }
34140 }
34141@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34142 if (!check_pci_errors)
34143 return;
34144
34145- before_count = atomic_read(&pci_parity_count);
34146+ before_count = atomic_read_unchecked(&pci_parity_count);
34147
34148 /* scan all PCI devices looking for a Parity Error on devices and
34149 * bridges.
34150@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34151 /* Only if operator has selected panic on PCI Error */
34152 if (edac_pci_get_panic_on_pe()) {
34153 /* If the count is different 'after' from 'before' */
34154- if (before_count != atomic_read(&pci_parity_count))
34155+ if (before_count != atomic_read_unchecked(&pci_parity_count))
34156 panic("EDAC: PCI Parity Error");
34157 }
34158 }
34159diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
34160index 6796799..99e8377 100644
34161--- a/drivers/edac/mce_amd.h
34162+++ b/drivers/edac/mce_amd.h
34163@@ -78,7 +78,7 @@ extern const char * const ii_msgs[];
34164 struct amd_decoder_ops {
34165 bool (*mc0_mce)(u16, u8);
34166 bool (*mc1_mce)(u16, u8);
34167-};
34168+} __no_const;
34169
34170 void amd_report_gart_errors(bool);
34171 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
34172diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
34173index 57ea7f4..789e3c3 100644
34174--- a/drivers/firewire/core-card.c
34175+++ b/drivers/firewire/core-card.c
34176@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
34177
34178 void fw_core_remove_card(struct fw_card *card)
34179 {
34180- struct fw_card_driver dummy_driver = dummy_driver_template;
34181+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
34182
34183 card->driver->update_phy_reg(card, 4,
34184 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
34185diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
34186index f8d2287..5aaf4db 100644
34187--- a/drivers/firewire/core-cdev.c
34188+++ b/drivers/firewire/core-cdev.c
34189@@ -1365,8 +1365,7 @@ static int init_iso_resource(struct client *client,
34190 int ret;
34191
34192 if ((request->channels == 0 && request->bandwidth == 0) ||
34193- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
34194- request->bandwidth < 0)
34195+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
34196 return -EINVAL;
34197
34198 r = kmalloc(sizeof(*r), GFP_KERNEL);
34199diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
34200index af3e8aa..eb2f227 100644
34201--- a/drivers/firewire/core-device.c
34202+++ b/drivers/firewire/core-device.c
34203@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
34204 struct config_rom_attribute {
34205 struct device_attribute attr;
34206 u32 key;
34207-};
34208+} __do_const;
34209
34210 static ssize_t show_immediate(struct device *dev,
34211 struct device_attribute *dattr, char *buf)
34212diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
34213index 28a94c7..58da63a 100644
34214--- a/drivers/firewire/core-transaction.c
34215+++ b/drivers/firewire/core-transaction.c
34216@@ -38,6 +38,7 @@
34217 #include <linux/timer.h>
34218 #include <linux/types.h>
34219 #include <linux/workqueue.h>
34220+#include <linux/sched.h>
34221
34222 #include <asm/byteorder.h>
34223
34224diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
34225index 515a42c..5ecf3ba 100644
34226--- a/drivers/firewire/core.h
34227+++ b/drivers/firewire/core.h
34228@@ -111,6 +111,7 @@ struct fw_card_driver {
34229
34230 int (*stop_iso)(struct fw_iso_context *ctx);
34231 };
34232+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
34233
34234 void fw_card_initialize(struct fw_card *card,
34235 const struct fw_card_driver *driver, struct device *device);
34236diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
34237index 94a58a0..f5eba42 100644
34238--- a/drivers/firmware/dmi-id.c
34239+++ b/drivers/firmware/dmi-id.c
34240@@ -16,7 +16,7 @@
34241 struct dmi_device_attribute{
34242 struct device_attribute dev_attr;
34243 int field;
34244-};
34245+} __do_const;
34246 #define to_dmi_dev_attr(_dev_attr) \
34247 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
34248
34249diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
34250index 982f1f5..d21e5da 100644
34251--- a/drivers/firmware/dmi_scan.c
34252+++ b/drivers/firmware/dmi_scan.c
34253@@ -491,11 +491,6 @@ void __init dmi_scan_machine(void)
34254 }
34255 }
34256 else {
34257- /*
34258- * no iounmap() for that ioremap(); it would be a no-op, but
34259- * it's so early in setup that sucker gets confused into doing
34260- * what it shouldn't if we actually call it.
34261- */
34262 p = dmi_ioremap(0xF0000, 0x10000);
34263 if (p == NULL)
34264 goto error;
34265@@ -770,7 +765,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
34266 if (buf == NULL)
34267 return -1;
34268
34269- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
34270+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
34271
34272 iounmap(buf);
34273 return 0;
34274diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
34275index bcb201c..4fd34dd 100644
34276--- a/drivers/firmware/efivars.c
34277+++ b/drivers/firmware/efivars.c
34278@@ -133,7 +133,7 @@ struct efivar_attribute {
34279 };
34280
34281 static struct efivars __efivars;
34282-static struct efivar_operations ops;
34283+static efivar_operations_no_const ops __read_only;
34284
34285 #define PSTORE_EFI_ATTRIBUTES \
34286 (EFI_VARIABLE_NON_VOLATILE | \
34287@@ -1734,7 +1734,7 @@ efivar_create_sysfs_entry(struct efivars *efivars,
34288 static int
34289 create_efivars_bin_attributes(struct efivars *efivars)
34290 {
34291- struct bin_attribute *attr;
34292+ bin_attribute_no_const *attr;
34293 int error;
34294
34295 /* new_var */
34296diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
34297index 2a90ba6..07f3733 100644
34298--- a/drivers/firmware/google/memconsole.c
34299+++ b/drivers/firmware/google/memconsole.c
34300@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
34301 if (!found_memconsole())
34302 return -ENODEV;
34303
34304- memconsole_bin_attr.size = memconsole_length;
34305+ pax_open_kernel();
34306+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
34307+ pax_close_kernel();
34308
34309 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
34310
34311diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
34312index 6f2306d..af9476a 100644
34313--- a/drivers/gpio/gpio-ich.c
34314+++ b/drivers/gpio/gpio-ich.c
34315@@ -69,7 +69,7 @@ struct ichx_desc {
34316 /* Some chipsets have quirks, let these use their own request/get */
34317 int (*request)(struct gpio_chip *chip, unsigned offset);
34318 int (*get)(struct gpio_chip *chip, unsigned offset);
34319-};
34320+} __do_const;
34321
34322 static struct {
34323 spinlock_t lock;
34324diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
34325index 9902732..64b62dd 100644
34326--- a/drivers/gpio/gpio-vr41xx.c
34327+++ b/drivers/gpio/gpio-vr41xx.c
34328@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
34329 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
34330 maskl, pendl, maskh, pendh);
34331
34332- atomic_inc(&irq_err_count);
34333+ atomic_inc_unchecked(&irq_err_count);
34334
34335 return -EINVAL;
34336 }
34337diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
34338index 7b2d378..cc947ea 100644
34339--- a/drivers/gpu/drm/drm_crtc_helper.c
34340+++ b/drivers/gpu/drm/drm_crtc_helper.c
34341@@ -319,7 +319,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
34342 struct drm_crtc *tmp;
34343 int crtc_mask = 1;
34344
34345- WARN(!crtc, "checking null crtc?\n");
34346+ BUG_ON(!crtc);
34347
34348 dev = crtc->dev;
34349
34350diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
34351index be174ca..7f38143 100644
34352--- a/drivers/gpu/drm/drm_drv.c
34353+++ b/drivers/gpu/drm/drm_drv.c
34354@@ -307,7 +307,7 @@ module_exit(drm_core_exit);
34355 /**
34356 * Copy and IOCTL return string to user space
34357 */
34358-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
34359+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
34360 {
34361 int len;
34362
34363@@ -377,7 +377,7 @@ long drm_ioctl(struct file *filp,
34364 struct drm_file *file_priv = filp->private_data;
34365 struct drm_device *dev;
34366 struct drm_ioctl_desc *ioctl;
34367- drm_ioctl_t *func;
34368+ drm_ioctl_no_const_t func;
34369 unsigned int nr = DRM_IOCTL_NR(cmd);
34370 int retcode = -EINVAL;
34371 char stack_kdata[128];
34372@@ -390,7 +390,7 @@ long drm_ioctl(struct file *filp,
34373 return -ENODEV;
34374
34375 atomic_inc(&dev->ioctl_count);
34376- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
34377+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
34378 ++file_priv->ioctl_count;
34379
34380 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
34381diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
34382index 133b413..fd68225 100644
34383--- a/drivers/gpu/drm/drm_fops.c
34384+++ b/drivers/gpu/drm/drm_fops.c
34385@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
34386 }
34387
34388 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
34389- atomic_set(&dev->counts[i], 0);
34390+ atomic_set_unchecked(&dev->counts[i], 0);
34391
34392 dev->sigdata.lock = NULL;
34393
34394@@ -134,7 +134,7 @@ int drm_open(struct inode *inode, struct file *filp)
34395 if (drm_device_is_unplugged(dev))
34396 return -ENODEV;
34397
34398- if (!dev->open_count++)
34399+ if (local_inc_return(&dev->open_count) == 1)
34400 need_setup = 1;
34401 mutex_lock(&dev->struct_mutex);
34402 old_mapping = dev->dev_mapping;
34403@@ -149,7 +149,7 @@ int drm_open(struct inode *inode, struct file *filp)
34404 retcode = drm_open_helper(inode, filp, dev);
34405 if (retcode)
34406 goto err_undo;
34407- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
34408+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
34409 if (need_setup) {
34410 retcode = drm_setup(dev);
34411 if (retcode)
34412@@ -164,7 +164,7 @@ err_undo:
34413 iput(container_of(dev->dev_mapping, struct inode, i_data));
34414 dev->dev_mapping = old_mapping;
34415 mutex_unlock(&dev->struct_mutex);
34416- dev->open_count--;
34417+ local_dec(&dev->open_count);
34418 return retcode;
34419 }
34420 EXPORT_SYMBOL(drm_open);
34421@@ -438,7 +438,7 @@ int drm_release(struct inode *inode, struct file *filp)
34422
34423 mutex_lock(&drm_global_mutex);
34424
34425- DRM_DEBUG("open_count = %d\n", dev->open_count);
34426+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
34427
34428 if (dev->driver->preclose)
34429 dev->driver->preclose(dev, file_priv);
34430@@ -447,10 +447,10 @@ int drm_release(struct inode *inode, struct file *filp)
34431 * Begin inline drm_release
34432 */
34433
34434- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
34435+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
34436 task_pid_nr(current),
34437 (long)old_encode_dev(file_priv->minor->device),
34438- dev->open_count);
34439+ local_read(&dev->open_count));
34440
34441 /* Release any auth tokens that might point to this file_priv,
34442 (do that under the drm_global_mutex) */
34443@@ -547,8 +547,8 @@ int drm_release(struct inode *inode, struct file *filp)
34444 * End inline drm_release
34445 */
34446
34447- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
34448- if (!--dev->open_count) {
34449+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
34450+ if (local_dec_and_test(&dev->open_count)) {
34451 if (atomic_read(&dev->ioctl_count)) {
34452 DRM_ERROR("Device busy: %d\n",
34453 atomic_read(&dev->ioctl_count));
34454diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
34455index f731116..629842c 100644
34456--- a/drivers/gpu/drm/drm_global.c
34457+++ b/drivers/gpu/drm/drm_global.c
34458@@ -36,7 +36,7 @@
34459 struct drm_global_item {
34460 struct mutex mutex;
34461 void *object;
34462- int refcount;
34463+ atomic_t refcount;
34464 };
34465
34466 static struct drm_global_item glob[DRM_GLOBAL_NUM];
34467@@ -49,7 +49,7 @@ void drm_global_init(void)
34468 struct drm_global_item *item = &glob[i];
34469 mutex_init(&item->mutex);
34470 item->object = NULL;
34471- item->refcount = 0;
34472+ atomic_set(&item->refcount, 0);
34473 }
34474 }
34475
34476@@ -59,7 +59,7 @@ void drm_global_release(void)
34477 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
34478 struct drm_global_item *item = &glob[i];
34479 BUG_ON(item->object != NULL);
34480- BUG_ON(item->refcount != 0);
34481+ BUG_ON(atomic_read(&item->refcount) != 0);
34482 }
34483 }
34484
34485@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34486 void *object;
34487
34488 mutex_lock(&item->mutex);
34489- if (item->refcount == 0) {
34490+ if (atomic_read(&item->refcount) == 0) {
34491 item->object = kzalloc(ref->size, GFP_KERNEL);
34492 if (unlikely(item->object == NULL)) {
34493 ret = -ENOMEM;
34494@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
34495 goto out_err;
34496
34497 }
34498- ++item->refcount;
34499+ atomic_inc(&item->refcount);
34500 ref->object = item->object;
34501 object = item->object;
34502 mutex_unlock(&item->mutex);
34503@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
34504 struct drm_global_item *item = &glob[ref->global_type];
34505
34506 mutex_lock(&item->mutex);
34507- BUG_ON(item->refcount == 0);
34508+ BUG_ON(atomic_read(&item->refcount) == 0);
34509 BUG_ON(ref->object != item->object);
34510- if (--item->refcount == 0) {
34511+ if (atomic_dec_and_test(&item->refcount)) {
34512 ref->release(ref);
34513 item->object = NULL;
34514 }
34515diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
34516index d4b20ce..77a8d41 100644
34517--- a/drivers/gpu/drm/drm_info.c
34518+++ b/drivers/gpu/drm/drm_info.c
34519@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
34520 struct drm_local_map *map;
34521 struct drm_map_list *r_list;
34522
34523- /* Hardcoded from _DRM_FRAME_BUFFER,
34524- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
34525- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
34526- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
34527+ static const char * const types[] = {
34528+ [_DRM_FRAME_BUFFER] = "FB",
34529+ [_DRM_REGISTERS] = "REG",
34530+ [_DRM_SHM] = "SHM",
34531+ [_DRM_AGP] = "AGP",
34532+ [_DRM_SCATTER_GATHER] = "SG",
34533+ [_DRM_CONSISTENT] = "PCI",
34534+ [_DRM_GEM] = "GEM" };
34535 const char *type;
34536 int i;
34537
34538@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
34539 map = r_list->map;
34540 if (!map)
34541 continue;
34542- if (map->type < 0 || map->type > 5)
34543+ if (map->type >= ARRAY_SIZE(types))
34544 type = "??";
34545 else
34546 type = types[map->type];
34547@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
34548 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
34549 vma->vm_flags & VM_LOCKED ? 'l' : '-',
34550 vma->vm_flags & VM_IO ? 'i' : '-',
34551+#ifdef CONFIG_GRKERNSEC_HIDESYM
34552+ 0);
34553+#else
34554 vma->vm_pgoff);
34555+#endif
34556
34557 #if defined(__i386__)
34558 pgprot = pgprot_val(vma->vm_page_prot);
34559diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
34560index 2f4c434..dd12cd2 100644
34561--- a/drivers/gpu/drm/drm_ioc32.c
34562+++ b/drivers/gpu/drm/drm_ioc32.c
34563@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
34564 request = compat_alloc_user_space(nbytes);
34565 if (!access_ok(VERIFY_WRITE, request, nbytes))
34566 return -EFAULT;
34567- list = (struct drm_buf_desc *) (request + 1);
34568+ list = (struct drm_buf_desc __user *) (request + 1);
34569
34570 if (__put_user(count, &request->count)
34571 || __put_user(list, &request->list))
34572@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
34573 request = compat_alloc_user_space(nbytes);
34574 if (!access_ok(VERIFY_WRITE, request, nbytes))
34575 return -EFAULT;
34576- list = (struct drm_buf_pub *) (request + 1);
34577+ list = (struct drm_buf_pub __user *) (request + 1);
34578
34579 if (__put_user(count, &request->count)
34580 || __put_user(list, &request->list))
34581@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
34582 return 0;
34583 }
34584
34585-drm_ioctl_compat_t *drm_compat_ioctls[] = {
34586+drm_ioctl_compat_t drm_compat_ioctls[] = {
34587 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
34588 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
34589 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
34590@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
34591 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34592 {
34593 unsigned int nr = DRM_IOCTL_NR(cmd);
34594- drm_ioctl_compat_t *fn;
34595 int ret;
34596
34597 /* Assume that ioctls without an explicit compat routine will just
34598@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34599 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
34600 return drm_ioctl(filp, cmd, arg);
34601
34602- fn = drm_compat_ioctls[nr];
34603-
34604- if (fn != NULL)
34605- ret = (*fn) (filp, cmd, arg);
34606+ if (drm_compat_ioctls[nr] != NULL)
34607+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
34608 else
34609 ret = drm_ioctl(filp, cmd, arg);
34610
34611diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
34612index e77bd8b..1571b85 100644
34613--- a/drivers/gpu/drm/drm_ioctl.c
34614+++ b/drivers/gpu/drm/drm_ioctl.c
34615@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
34616 stats->data[i].value =
34617 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
34618 else
34619- stats->data[i].value = atomic_read(&dev->counts[i]);
34620+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
34621 stats->data[i].type = dev->types[i];
34622 }
34623
34624diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
34625index d752c96..fe08455 100644
34626--- a/drivers/gpu/drm/drm_lock.c
34627+++ b/drivers/gpu/drm/drm_lock.c
34628@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34629 if (drm_lock_take(&master->lock, lock->context)) {
34630 master->lock.file_priv = file_priv;
34631 master->lock.lock_time = jiffies;
34632- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
34633+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
34634 break; /* Got lock */
34635 }
34636
34637@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
34638 return -EINVAL;
34639 }
34640
34641- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
34642+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
34643
34644 if (drm_lock_free(&master->lock, lock->context)) {
34645 /* FIXME: Should really bail out here. */
34646diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
34647index 200e104..59facda 100644
34648--- a/drivers/gpu/drm/drm_stub.c
34649+++ b/drivers/gpu/drm/drm_stub.c
34650@@ -516,7 +516,7 @@ void drm_unplug_dev(struct drm_device *dev)
34651
34652 drm_device_set_unplugged(dev);
34653
34654- if (dev->open_count == 0) {
34655+ if (local_read(&dev->open_count) == 0) {
34656 drm_put_dev(dev);
34657 }
34658 mutex_unlock(&drm_global_mutex);
34659diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
34660index 004ecdf..db1f6e0 100644
34661--- a/drivers/gpu/drm/i810/i810_dma.c
34662+++ b/drivers/gpu/drm/i810/i810_dma.c
34663@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
34664 dma->buflist[vertex->idx],
34665 vertex->discard, vertex->used);
34666
34667- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34668- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34669+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
34670+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34671 sarea_priv->last_enqueue = dev_priv->counter - 1;
34672 sarea_priv->last_dispatch = (int)hw_status[5];
34673
34674@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
34675 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
34676 mc->last_render);
34677
34678- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34679- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
34680+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
34681+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
34682 sarea_priv->last_enqueue = dev_priv->counter - 1;
34683 sarea_priv->last_dispatch = (int)hw_status[5];
34684
34685diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
34686index 6e0acad..93c8289 100644
34687--- a/drivers/gpu/drm/i810/i810_drv.h
34688+++ b/drivers/gpu/drm/i810/i810_drv.h
34689@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
34690 int page_flipping;
34691
34692 wait_queue_head_t irq_queue;
34693- atomic_t irq_received;
34694- atomic_t irq_emitted;
34695+ atomic_unchecked_t irq_received;
34696+ atomic_unchecked_t irq_emitted;
34697
34698 int front_offset;
34699 } drm_i810_private_t;
34700diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
34701index 8a7c48b..72effc2 100644
34702--- a/drivers/gpu/drm/i915/i915_debugfs.c
34703+++ b/drivers/gpu/drm/i915/i915_debugfs.c
34704@@ -496,7 +496,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
34705 I915_READ(GTIMR));
34706 }
34707 seq_printf(m, "Interrupts received: %d\n",
34708- atomic_read(&dev_priv->irq_received));
34709+ atomic_read_unchecked(&dev_priv->irq_received));
34710 for_each_ring(ring, dev_priv, i) {
34711 if (IS_GEN6(dev) || IS_GEN7(dev)) {
34712 seq_printf(m,
34713diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
34714index 99daa89..84ebd44 100644
34715--- a/drivers/gpu/drm/i915/i915_dma.c
34716+++ b/drivers/gpu/drm/i915/i915_dma.c
34717@@ -1253,7 +1253,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
34718 bool can_switch;
34719
34720 spin_lock(&dev->count_lock);
34721- can_switch = (dev->open_count == 0);
34722+ can_switch = (local_read(&dev->open_count) == 0);
34723 spin_unlock(&dev->count_lock);
34724 return can_switch;
34725 }
34726diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
34727index 7339a4b..445aaba 100644
34728--- a/drivers/gpu/drm/i915/i915_drv.h
34729+++ b/drivers/gpu/drm/i915/i915_drv.h
34730@@ -656,7 +656,7 @@ typedef struct drm_i915_private {
34731 drm_dma_handle_t *status_page_dmah;
34732 struct resource mch_res;
34733
34734- atomic_t irq_received;
34735+ atomic_unchecked_t irq_received;
34736
34737 /* protects the irq masks */
34738 spinlock_t irq_lock;
34739@@ -1102,7 +1102,7 @@ struct drm_i915_gem_object {
34740 * will be page flipped away on the next vblank. When it
34741 * reaches 0, dev_priv->pending_flip_queue will be woken up.
34742 */
34743- atomic_t pending_flip;
34744+ atomic_unchecked_t pending_flip;
34745 };
34746 #define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base)
34747
34748@@ -1633,7 +1633,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
34749 struct drm_i915_private *dev_priv, unsigned port);
34750 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
34751 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
34752-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
34753+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
34754 {
34755 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
34756 }
34757diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
34758index 26d08bb..fccb984 100644
34759--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
34760+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
34761@@ -672,7 +672,7 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
34762 i915_gem_clflush_object(obj);
34763
34764 if (obj->base.pending_write_domain)
34765- flips |= atomic_read(&obj->pending_flip);
34766+ flips |= atomic_read_unchecked(&obj->pending_flip);
34767
34768 flush_domains |= obj->base.write_domain;
34769 }
34770@@ -703,9 +703,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
34771
34772 static int
34773 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
34774- int count)
34775+ unsigned int count)
34776 {
34777- int i;
34778+ unsigned int i;
34779
34780 for (i = 0; i < count; i++) {
34781 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
34782diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
34783index 3c59584..500f2e9 100644
34784--- a/drivers/gpu/drm/i915/i915_ioc32.c
34785+++ b/drivers/gpu/drm/i915/i915_ioc32.c
34786@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
34787 (unsigned long)request);
34788 }
34789
34790-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
34791+static drm_ioctl_compat_t i915_compat_ioctls[] = {
34792 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
34793 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
34794 [DRM_I915_GETPARAM] = compat_i915_getparam,
34795@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
34796 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
34797 {
34798 unsigned int nr = DRM_IOCTL_NR(cmd);
34799- drm_ioctl_compat_t *fn = NULL;
34800 int ret;
34801
34802 if (nr < DRM_COMMAND_BASE)
34803 return drm_compat_ioctl(filp, cmd, arg);
34804
34805- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
34806- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
34807-
34808- if (fn != NULL)
34809+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
34810+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
34811 ret = (*fn) (filp, cmd, arg);
34812- else
34813+ } else
34814 ret = drm_ioctl(filp, cmd, arg);
34815
34816 return ret;
34817diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
34818index fe84338..a863190 100644
34819--- a/drivers/gpu/drm/i915/i915_irq.c
34820+++ b/drivers/gpu/drm/i915/i915_irq.c
34821@@ -535,7 +535,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
34822 u32 pipe_stats[I915_MAX_PIPES];
34823 bool blc_event;
34824
34825- atomic_inc(&dev_priv->irq_received);
34826+ atomic_inc_unchecked(&dev_priv->irq_received);
34827
34828 while (true) {
34829 iir = I915_READ(VLV_IIR);
34830@@ -688,7 +688,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
34831 irqreturn_t ret = IRQ_NONE;
34832 int i;
34833
34834- atomic_inc(&dev_priv->irq_received);
34835+ atomic_inc_unchecked(&dev_priv->irq_received);
34836
34837 /* disable master interrupt before clearing iir */
34838 de_ier = I915_READ(DEIER);
34839@@ -760,7 +760,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
34840 int ret = IRQ_NONE;
34841 u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
34842
34843- atomic_inc(&dev_priv->irq_received);
34844+ atomic_inc_unchecked(&dev_priv->irq_received);
34845
34846 /* disable master interrupt before clearing iir */
34847 de_ier = I915_READ(DEIER);
34848@@ -1787,7 +1787,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
34849 {
34850 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34851
34852- atomic_set(&dev_priv->irq_received, 0);
34853+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34854
34855 I915_WRITE(HWSTAM, 0xeffe);
34856
34857@@ -1813,7 +1813,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
34858 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34859 int pipe;
34860
34861- atomic_set(&dev_priv->irq_received, 0);
34862+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34863
34864 /* VLV magic */
34865 I915_WRITE(VLV_IMR, 0);
34866@@ -2108,7 +2108,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
34867 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34868 int pipe;
34869
34870- atomic_set(&dev_priv->irq_received, 0);
34871+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34872
34873 for_each_pipe(pipe)
34874 I915_WRITE(PIPESTAT(pipe), 0);
34875@@ -2159,7 +2159,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
34876 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
34877 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
34878
34879- atomic_inc(&dev_priv->irq_received);
34880+ atomic_inc_unchecked(&dev_priv->irq_received);
34881
34882 iir = I915_READ16(IIR);
34883 if (iir == 0)
34884@@ -2244,7 +2244,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
34885 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34886 int pipe;
34887
34888- atomic_set(&dev_priv->irq_received, 0);
34889+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34890
34891 if (I915_HAS_HOTPLUG(dev)) {
34892 I915_WRITE(PORT_HOTPLUG_EN, 0);
34893@@ -2339,7 +2339,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
34894 };
34895 int pipe, ret = IRQ_NONE;
34896
34897- atomic_inc(&dev_priv->irq_received);
34898+ atomic_inc_unchecked(&dev_priv->irq_received);
34899
34900 iir = I915_READ(IIR);
34901 do {
34902@@ -2465,7 +2465,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
34903 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
34904 int pipe;
34905
34906- atomic_set(&dev_priv->irq_received, 0);
34907+ atomic_set_unchecked(&dev_priv->irq_received, 0);
34908
34909 I915_WRITE(PORT_HOTPLUG_EN, 0);
34910 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
34911@@ -2572,7 +2572,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
34912 int irq_received;
34913 int ret = IRQ_NONE, pipe;
34914
34915- atomic_inc(&dev_priv->irq_received);
34916+ atomic_inc_unchecked(&dev_priv->irq_received);
34917
34918 iir = I915_READ(IIR);
34919
34920diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
34921index 80aa1fc..85cfce3 100644
34922--- a/drivers/gpu/drm/i915/intel_display.c
34923+++ b/drivers/gpu/drm/i915/intel_display.c
34924@@ -2255,7 +2255,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
34925
34926 wait_event(dev_priv->pending_flip_queue,
34927 atomic_read(&dev_priv->mm.wedged) ||
34928- atomic_read(&obj->pending_flip) == 0);
34929+ atomic_read_unchecked(&obj->pending_flip) == 0);
34930
34931 /* Big Hammer, we also need to ensure that any pending
34932 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
34933@@ -7122,8 +7122,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
34934
34935 obj = work->old_fb_obj;
34936
34937- atomic_clear_mask(1 << intel_crtc->plane,
34938- &obj->pending_flip.counter);
34939+ atomic_clear_mask_unchecked(1 << intel_crtc->plane, &obj->pending_flip);
34940 wake_up(&dev_priv->pending_flip_queue);
34941
34942 queue_work(dev_priv->wq, &work->work);
34943@@ -7490,7 +7489,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
34944 /* Block clients from rendering to the new back buffer until
34945 * the flip occurs and the object is no longer visible.
34946 */
34947- atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34948+ atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34949 atomic_inc(&intel_crtc->unpin_work_count);
34950
34951 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
34952@@ -7507,7 +7506,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
34953
34954 cleanup_pending:
34955 atomic_dec(&intel_crtc->unpin_work_count);
34956- atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34957+ atomic_sub_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
34958 drm_gem_object_unreference(&work->old_fb_obj->base);
34959 drm_gem_object_unreference(&obj->base);
34960 mutex_unlock(&dev->struct_mutex);
34961@@ -8849,13 +8848,13 @@ struct intel_quirk {
34962 int subsystem_vendor;
34963 int subsystem_device;
34964 void (*hook)(struct drm_device *dev);
34965-};
34966+} __do_const;
34967
34968 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
34969 struct intel_dmi_quirk {
34970 void (*hook)(struct drm_device *dev);
34971 const struct dmi_system_id (*dmi_id_list)[];
34972-};
34973+} __do_const;
34974
34975 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
34976 {
34977@@ -8863,18 +8862,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
34978 return 1;
34979 }
34980
34981+static const struct dmi_system_id intel_dmi_quirks_table[] = {
34982+ {
34983+ .callback = intel_dmi_reverse_brightness,
34984+ .ident = "NCR Corporation",
34985+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
34986+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
34987+ },
34988+ },
34989+ { } /* terminating entry */
34990+};
34991+
34992 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
34993 {
34994- .dmi_id_list = &(const struct dmi_system_id[]) {
34995- {
34996- .callback = intel_dmi_reverse_brightness,
34997- .ident = "NCR Corporation",
34998- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
34999- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35000- },
35001- },
35002- { } /* terminating entry */
35003- },
35004+ .dmi_id_list = &intel_dmi_quirks_table,
35005 .hook = quirk_invert_brightness,
35006 },
35007 };
35008diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35009index 54558a0..2d97005 100644
35010--- a/drivers/gpu/drm/mga/mga_drv.h
35011+++ b/drivers/gpu/drm/mga/mga_drv.h
35012@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35013 u32 clear_cmd;
35014 u32 maccess;
35015
35016- atomic_t vbl_received; /**< Number of vblanks received. */
35017+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35018 wait_queue_head_t fence_queue;
35019- atomic_t last_fence_retired;
35020+ atomic_unchecked_t last_fence_retired;
35021 u32 next_fence_to_post;
35022
35023 unsigned int fb_cpp;
35024diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35025index 709e90d..89a1c0d 100644
35026--- a/drivers/gpu/drm/mga/mga_ioc32.c
35027+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35028@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35029 return 0;
35030 }
35031
35032-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35033+drm_ioctl_compat_t mga_compat_ioctls[] = {
35034 [DRM_MGA_INIT] = compat_mga_init,
35035 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35036 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35037@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35038 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35039 {
35040 unsigned int nr = DRM_IOCTL_NR(cmd);
35041- drm_ioctl_compat_t *fn = NULL;
35042 int ret;
35043
35044 if (nr < DRM_COMMAND_BASE)
35045 return drm_compat_ioctl(filp, cmd, arg);
35046
35047- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35048- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35049-
35050- if (fn != NULL)
35051+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35052+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35053 ret = (*fn) (filp, cmd, arg);
35054- else
35055+ } else
35056 ret = drm_ioctl(filp, cmd, arg);
35057
35058 return ret;
35059diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35060index 598c281..60d590e 100644
35061--- a/drivers/gpu/drm/mga/mga_irq.c
35062+++ b/drivers/gpu/drm/mga/mga_irq.c
35063@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35064 if (crtc != 0)
35065 return 0;
35066
35067- return atomic_read(&dev_priv->vbl_received);
35068+ return atomic_read_unchecked(&dev_priv->vbl_received);
35069 }
35070
35071
35072@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35073 /* VBLANK interrupt */
35074 if (status & MGA_VLINEPEN) {
35075 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35076- atomic_inc(&dev_priv->vbl_received);
35077+ atomic_inc_unchecked(&dev_priv->vbl_received);
35078 drm_handle_vblank(dev, 0);
35079 handled = 1;
35080 }
35081@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35082 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35083 MGA_WRITE(MGA_PRIMEND, prim_end);
35084
35085- atomic_inc(&dev_priv->last_fence_retired);
35086+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35087 DRM_WAKEUP(&dev_priv->fence_queue);
35088 handled = 1;
35089 }
35090@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35091 * using fences.
35092 */
35093 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35094- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35095+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35096 - *sequence) <= (1 << 23)));
35097
35098 *sequence = cur_fence;
35099diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35100index 865eddf..62c4cc3 100644
35101--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35102+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35103@@ -1015,7 +1015,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35104 struct bit_table {
35105 const char id;
35106 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35107-};
35108+} __no_const;
35109
35110 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35111
35112diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35113index aa89eb9..d45d38b 100644
35114--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35115+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35116@@ -80,7 +80,7 @@ struct nouveau_drm {
35117 struct drm_global_reference mem_global_ref;
35118 struct ttm_bo_global_ref bo_global_ref;
35119 struct ttm_bo_device bdev;
35120- atomic_t validate_sequence;
35121+ atomic_unchecked_t validate_sequence;
35122 int (*move)(struct nouveau_channel *,
35123 struct ttm_buffer_object *,
35124 struct ttm_mem_reg *, struct ttm_mem_reg *);
35125diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
35126index cdb83ac..27f0a16 100644
35127--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
35128+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
35129@@ -43,7 +43,7 @@ struct nouveau_fence_priv {
35130 int (*sync)(struct nouveau_fence *, struct nouveau_channel *,
35131 struct nouveau_channel *);
35132 u32 (*read)(struct nouveau_channel *);
35133-};
35134+} __no_const;
35135
35136 #define nouveau_fence(drm) ((struct nouveau_fence_priv *)(drm)->fence)
35137
35138diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35139index 8bf695c..9fbc90a 100644
35140--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35141+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35142@@ -321,7 +321,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35143 int trycnt = 0;
35144 int ret, i;
35145
35146- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35147+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35148 retry:
35149 if (++trycnt > 100000) {
35150 NV_ERROR(drm, "%s failed and gave up.\n", __func__);
35151diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35152index 08214bc..9208577 100644
35153--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35154+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35155@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35156 unsigned long arg)
35157 {
35158 unsigned int nr = DRM_IOCTL_NR(cmd);
35159- drm_ioctl_compat_t *fn = NULL;
35160+ drm_ioctl_compat_t fn = NULL;
35161 int ret;
35162
35163 if (nr < DRM_COMMAND_BASE)
35164diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35165index 25d3495..d81aaf6 100644
35166--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35167+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35168@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35169 bool can_switch;
35170
35171 spin_lock(&dev->count_lock);
35172- can_switch = (dev->open_count == 0);
35173+ can_switch = (local_read(&dev->open_count) == 0);
35174 spin_unlock(&dev->count_lock);
35175 return can_switch;
35176 }
35177diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
35178index d4660cf..70dbe65 100644
35179--- a/drivers/gpu/drm/r128/r128_cce.c
35180+++ b/drivers/gpu/drm/r128/r128_cce.c
35181@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
35182
35183 /* GH: Simple idle check.
35184 */
35185- atomic_set(&dev_priv->idle_count, 0);
35186+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35187
35188 /* We don't support anything other than bus-mastering ring mode,
35189 * but the ring can be in either AGP or PCI space for the ring
35190diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
35191index 930c71b..499aded 100644
35192--- a/drivers/gpu/drm/r128/r128_drv.h
35193+++ b/drivers/gpu/drm/r128/r128_drv.h
35194@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
35195 int is_pci;
35196 unsigned long cce_buffers_offset;
35197
35198- atomic_t idle_count;
35199+ atomic_unchecked_t idle_count;
35200
35201 int page_flipping;
35202 int current_page;
35203 u32 crtc_offset;
35204 u32 crtc_offset_cntl;
35205
35206- atomic_t vbl_received;
35207+ atomic_unchecked_t vbl_received;
35208
35209 u32 color_fmt;
35210 unsigned int front_offset;
35211diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
35212index a954c54..9cc595c 100644
35213--- a/drivers/gpu/drm/r128/r128_ioc32.c
35214+++ b/drivers/gpu/drm/r128/r128_ioc32.c
35215@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
35216 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
35217 }
35218
35219-drm_ioctl_compat_t *r128_compat_ioctls[] = {
35220+drm_ioctl_compat_t r128_compat_ioctls[] = {
35221 [DRM_R128_INIT] = compat_r128_init,
35222 [DRM_R128_DEPTH] = compat_r128_depth,
35223 [DRM_R128_STIPPLE] = compat_r128_stipple,
35224@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
35225 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35226 {
35227 unsigned int nr = DRM_IOCTL_NR(cmd);
35228- drm_ioctl_compat_t *fn = NULL;
35229 int ret;
35230
35231 if (nr < DRM_COMMAND_BASE)
35232 return drm_compat_ioctl(filp, cmd, arg);
35233
35234- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
35235- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35236-
35237- if (fn != NULL)
35238+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
35239+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
35240 ret = (*fn) (filp, cmd, arg);
35241- else
35242+ } else
35243 ret = drm_ioctl(filp, cmd, arg);
35244
35245 return ret;
35246diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
35247index 2ea4f09..d391371 100644
35248--- a/drivers/gpu/drm/r128/r128_irq.c
35249+++ b/drivers/gpu/drm/r128/r128_irq.c
35250@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
35251 if (crtc != 0)
35252 return 0;
35253
35254- return atomic_read(&dev_priv->vbl_received);
35255+ return atomic_read_unchecked(&dev_priv->vbl_received);
35256 }
35257
35258 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35259@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
35260 /* VBLANK interrupt */
35261 if (status & R128_CRTC_VBLANK_INT) {
35262 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
35263- atomic_inc(&dev_priv->vbl_received);
35264+ atomic_inc_unchecked(&dev_priv->vbl_received);
35265 drm_handle_vblank(dev, 0);
35266 return IRQ_HANDLED;
35267 }
35268diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
35269index 19bb7e6..de7e2a2 100644
35270--- a/drivers/gpu/drm/r128/r128_state.c
35271+++ b/drivers/gpu/drm/r128/r128_state.c
35272@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
35273
35274 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
35275 {
35276- if (atomic_read(&dev_priv->idle_count) == 0)
35277+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
35278 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
35279 else
35280- atomic_set(&dev_priv->idle_count, 0);
35281+ atomic_set_unchecked(&dev_priv->idle_count, 0);
35282 }
35283
35284 #endif
35285diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
35286index 5a82b6b..9e69c73 100644
35287--- a/drivers/gpu/drm/radeon/mkregtable.c
35288+++ b/drivers/gpu/drm/radeon/mkregtable.c
35289@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
35290 regex_t mask_rex;
35291 regmatch_t match[4];
35292 char buf[1024];
35293- size_t end;
35294+ long end;
35295 int len;
35296 int done = 0;
35297 int r;
35298 unsigned o;
35299 struct offset *offset;
35300 char last_reg_s[10];
35301- int last_reg;
35302+ unsigned long last_reg;
35303
35304 if (regcomp
35305 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
35306diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
35307index 0d6562b..a154330 100644
35308--- a/drivers/gpu/drm/radeon/radeon_device.c
35309+++ b/drivers/gpu/drm/radeon/radeon_device.c
35310@@ -969,7 +969,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
35311 bool can_switch;
35312
35313 spin_lock(&dev->count_lock);
35314- can_switch = (dev->open_count == 0);
35315+ can_switch = (local_read(&dev->open_count) == 0);
35316 spin_unlock(&dev->count_lock);
35317 return can_switch;
35318 }
35319diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
35320index e7fdf16..f4f6490 100644
35321--- a/drivers/gpu/drm/radeon/radeon_drv.h
35322+++ b/drivers/gpu/drm/radeon/radeon_drv.h
35323@@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
35324
35325 /* SW interrupt */
35326 wait_queue_head_t swi_queue;
35327- atomic_t swi_emitted;
35328+ atomic_unchecked_t swi_emitted;
35329 int vblank_crtc;
35330 uint32_t irq_enable_reg;
35331 uint32_t r500_disp_irq_reg;
35332diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
35333index c180df8..5fd8186 100644
35334--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
35335+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
35336@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35337 request = compat_alloc_user_space(sizeof(*request));
35338 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
35339 || __put_user(req32.param, &request->param)
35340- || __put_user((void __user *)(unsigned long)req32.value,
35341+ || __put_user((unsigned long)req32.value,
35342 &request->value))
35343 return -EFAULT;
35344
35345@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
35346 #define compat_radeon_cp_setparam NULL
35347 #endif /* X86_64 || IA64 */
35348
35349-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35350+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
35351 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
35352 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
35353 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
35354@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
35355 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35356 {
35357 unsigned int nr = DRM_IOCTL_NR(cmd);
35358- drm_ioctl_compat_t *fn = NULL;
35359 int ret;
35360
35361 if (nr < DRM_COMMAND_BASE)
35362 return drm_compat_ioctl(filp, cmd, arg);
35363
35364- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
35365- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35366-
35367- if (fn != NULL)
35368+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
35369+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
35370 ret = (*fn) (filp, cmd, arg);
35371- else
35372+ } else
35373 ret = drm_ioctl(filp, cmd, arg);
35374
35375 return ret;
35376diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
35377index e771033..a0bc6b3 100644
35378--- a/drivers/gpu/drm/radeon/radeon_irq.c
35379+++ b/drivers/gpu/drm/radeon/radeon_irq.c
35380@@ -224,8 +224,8 @@ static int radeon_emit_irq(struct drm_device * dev)
35381 unsigned int ret;
35382 RING_LOCALS;
35383
35384- atomic_inc(&dev_priv->swi_emitted);
35385- ret = atomic_read(&dev_priv->swi_emitted);
35386+ atomic_inc_unchecked(&dev_priv->swi_emitted);
35387+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
35388
35389 BEGIN_RING(4);
35390 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
35391@@ -351,7 +351,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
35392 drm_radeon_private_t *dev_priv =
35393 (drm_radeon_private_t *) dev->dev_private;
35394
35395- atomic_set(&dev_priv->swi_emitted, 0);
35396+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
35397 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
35398
35399 dev->max_vblank_count = 0x001fffff;
35400diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
35401index 8e9057b..af6dacb 100644
35402--- a/drivers/gpu/drm/radeon/radeon_state.c
35403+++ b/drivers/gpu/drm/radeon/radeon_state.c
35404@@ -2166,7 +2166,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
35405 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
35406 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
35407
35408- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35409+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
35410 sarea_priv->nbox * sizeof(depth_boxes[0])))
35411 return -EFAULT;
35412
35413@@ -3029,7 +3029,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
35414 {
35415 drm_radeon_private_t *dev_priv = dev->dev_private;
35416 drm_radeon_getparam_t *param = data;
35417- int value;
35418+ int value = 0;
35419
35420 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
35421
35422diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
35423index 93f760e..8088227 100644
35424--- a/drivers/gpu/drm/radeon/radeon_ttm.c
35425+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
35426@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
35427 man->size = size >> PAGE_SHIFT;
35428 }
35429
35430-static struct vm_operations_struct radeon_ttm_vm_ops;
35431+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
35432 static const struct vm_operations_struct *ttm_vm_ops = NULL;
35433
35434 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35435@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
35436 }
35437 if (unlikely(ttm_vm_ops == NULL)) {
35438 ttm_vm_ops = vma->vm_ops;
35439+ pax_open_kernel();
35440 radeon_ttm_vm_ops = *ttm_vm_ops;
35441 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
35442+ pax_close_kernel();
35443 }
35444 vma->vm_ops = &radeon_ttm_vm_ops;
35445 return 0;
35446@@ -862,28 +864,33 @@ static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
35447 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
35448 else
35449 sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
35450- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35451- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35452- radeon_mem_types_list[i].driver_features = 0;
35453+ pax_open_kernel();
35454+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35455+ *(void **)&radeon_mem_types_list[i].show = &radeon_mm_dump_table;
35456+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35457 if (i == 0)
35458- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35459+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
35460 else
35461- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35462-
35463+ *(void **)&radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
35464+ pax_close_kernel();
35465 }
35466 /* Add ttm page pool to debugfs */
35467 sprintf(radeon_mem_types_names[i], "ttm_page_pool");
35468- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35469- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35470- radeon_mem_types_list[i].driver_features = 0;
35471- radeon_mem_types_list[i++].data = NULL;
35472+ pax_open_kernel();
35473+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35474+ *(void **)&radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
35475+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35476+ *(void **)&radeon_mem_types_list[i++].data = NULL;
35477+ pax_close_kernel();
35478 #ifdef CONFIG_SWIOTLB
35479 if (swiotlb_nr_tbl()) {
35480 sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
35481- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35482- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35483- radeon_mem_types_list[i].driver_features = 0;
35484- radeon_mem_types_list[i++].data = NULL;
35485+ pax_open_kernel();
35486+ *(const char **)&radeon_mem_types_list[i].name = radeon_mem_types_names[i];
35487+ *(void **)&radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
35488+ *(u32 *)&radeon_mem_types_list[i].driver_features = 0;
35489+ *(void **)&radeon_mem_types_list[i++].data = NULL;
35490+ pax_close_kernel();
35491 }
35492 #endif
35493 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
35494diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
35495index 5706d2a..17aedaa 100644
35496--- a/drivers/gpu/drm/radeon/rs690.c
35497+++ b/drivers/gpu/drm/radeon/rs690.c
35498@@ -304,9 +304,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
35499 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
35500 rdev->pm.sideport_bandwidth.full)
35501 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
35502- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
35503+ read_delay_latency.full = dfixed_const(800 * 1000);
35504 read_delay_latency.full = dfixed_div(read_delay_latency,
35505 rdev->pm.igp_sideport_mclk);
35506+ a.full = dfixed_const(370);
35507+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
35508 } else {
35509 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
35510 rdev->pm.k8_bandwidth.full)
35511diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35512index bd2a3b4..122d9ad 100644
35513--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
35514+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
35515@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
35516 static int ttm_pool_mm_shrink(struct shrinker *shrink,
35517 struct shrink_control *sc)
35518 {
35519- static atomic_t start_pool = ATOMIC_INIT(0);
35520+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
35521 unsigned i;
35522- unsigned pool_offset = atomic_add_return(1, &start_pool);
35523+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
35524 struct ttm_page_pool *pool;
35525 int shrink_pages = sc->nr_to_scan;
35526
35527diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
35528index 1eb060c..188b1fc 100644
35529--- a/drivers/gpu/drm/udl/udl_fb.c
35530+++ b/drivers/gpu/drm/udl/udl_fb.c
35531@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
35532 fb_deferred_io_cleanup(info);
35533 kfree(info->fbdefio);
35534 info->fbdefio = NULL;
35535- info->fbops->fb_mmap = udl_fb_mmap;
35536 }
35537
35538 pr_warn("released /dev/fb%d user=%d count=%d\n",
35539diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
35540index 893a650..6190d3b 100644
35541--- a/drivers/gpu/drm/via/via_drv.h
35542+++ b/drivers/gpu/drm/via/via_drv.h
35543@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
35544 typedef uint32_t maskarray_t[5];
35545
35546 typedef struct drm_via_irq {
35547- atomic_t irq_received;
35548+ atomic_unchecked_t irq_received;
35549 uint32_t pending_mask;
35550 uint32_t enable_mask;
35551 wait_queue_head_t irq_queue;
35552@@ -75,7 +75,7 @@ typedef struct drm_via_private {
35553 struct timeval last_vblank;
35554 int last_vblank_valid;
35555 unsigned usec_per_vblank;
35556- atomic_t vbl_received;
35557+ atomic_unchecked_t vbl_received;
35558 drm_via_state_t hc_state;
35559 char pci_buf[VIA_PCI_BUF_SIZE];
35560 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
35561diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
35562index ac98964..5dbf512 100644
35563--- a/drivers/gpu/drm/via/via_irq.c
35564+++ b/drivers/gpu/drm/via/via_irq.c
35565@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
35566 if (crtc != 0)
35567 return 0;
35568
35569- return atomic_read(&dev_priv->vbl_received);
35570+ return atomic_read_unchecked(&dev_priv->vbl_received);
35571 }
35572
35573 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35574@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35575
35576 status = VIA_READ(VIA_REG_INTERRUPT);
35577 if (status & VIA_IRQ_VBLANK_PENDING) {
35578- atomic_inc(&dev_priv->vbl_received);
35579- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
35580+ atomic_inc_unchecked(&dev_priv->vbl_received);
35581+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
35582 do_gettimeofday(&cur_vblank);
35583 if (dev_priv->last_vblank_valid) {
35584 dev_priv->usec_per_vblank =
35585@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35586 dev_priv->last_vblank = cur_vblank;
35587 dev_priv->last_vblank_valid = 1;
35588 }
35589- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
35590+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
35591 DRM_DEBUG("US per vblank is: %u\n",
35592 dev_priv->usec_per_vblank);
35593 }
35594@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
35595
35596 for (i = 0; i < dev_priv->num_irqs; ++i) {
35597 if (status & cur_irq->pending_mask) {
35598- atomic_inc(&cur_irq->irq_received);
35599+ atomic_inc_unchecked(&cur_irq->irq_received);
35600 DRM_WAKEUP(&cur_irq->irq_queue);
35601 handled = 1;
35602 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
35603@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
35604 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35605 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
35606 masks[irq][4]));
35607- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
35608+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
35609 } else {
35610 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
35611 (((cur_irq_sequence =
35612- atomic_read(&cur_irq->irq_received)) -
35613+ atomic_read_unchecked(&cur_irq->irq_received)) -
35614 *sequence) <= (1 << 23)));
35615 }
35616 *sequence = cur_irq_sequence;
35617@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
35618 }
35619
35620 for (i = 0; i < dev_priv->num_irqs; ++i) {
35621- atomic_set(&cur_irq->irq_received, 0);
35622+ atomic_set_unchecked(&cur_irq->irq_received, 0);
35623 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
35624 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
35625 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
35626@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
35627 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
35628 case VIA_IRQ_RELATIVE:
35629 irqwait->request.sequence +=
35630- atomic_read(&cur_irq->irq_received);
35631+ atomic_read_unchecked(&cur_irq->irq_received);
35632 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
35633 case VIA_IRQ_ABSOLUTE:
35634 break;
35635diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35636index 13aeda7..4a952d1 100644
35637--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35638+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
35639@@ -290,7 +290,7 @@ struct vmw_private {
35640 * Fencing and IRQs.
35641 */
35642
35643- atomic_t marker_seq;
35644+ atomic_unchecked_t marker_seq;
35645 wait_queue_head_t fence_queue;
35646 wait_queue_head_t fifo_queue;
35647 int fence_queue_waiters; /* Protected by hw_mutex */
35648diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35649index 3eb1486..0a47ee9 100644
35650--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35651+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
35652@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
35653 (unsigned int) min,
35654 (unsigned int) fifo->capabilities);
35655
35656- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35657+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
35658 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
35659 vmw_marker_queue_init(&fifo->marker_queue);
35660 return vmw_fifo_send_fence(dev_priv, &dummy);
35661@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
35662 if (reserveable)
35663 iowrite32(bytes, fifo_mem +
35664 SVGA_FIFO_RESERVED);
35665- return fifo_mem + (next_cmd >> 2);
35666+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
35667 } else {
35668 need_bounce = true;
35669 }
35670@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
35671
35672 fm = vmw_fifo_reserve(dev_priv, bytes);
35673 if (unlikely(fm == NULL)) {
35674- *seqno = atomic_read(&dev_priv->marker_seq);
35675+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
35676 ret = -ENOMEM;
35677 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
35678 false, 3*HZ);
35679@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
35680 }
35681
35682 do {
35683- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
35684+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
35685 } while (*seqno == 0);
35686
35687 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
35688diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35689index 4640adb..e1384ed 100644
35690--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35691+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
35692@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
35693 * emitted. Then the fence is stale and signaled.
35694 */
35695
35696- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
35697+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
35698 > VMW_FENCE_WRAP);
35699
35700 return ret;
35701@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
35702
35703 if (fifo_idle)
35704 down_read(&fifo_state->rwsem);
35705- signal_seq = atomic_read(&dev_priv->marker_seq);
35706+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
35707 ret = 0;
35708
35709 for (;;) {
35710diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
35711index 8a8725c..afed796 100644
35712--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
35713+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
35714@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
35715 while (!vmw_lag_lt(queue, us)) {
35716 spin_lock(&queue->lock);
35717 if (list_empty(&queue->head))
35718- seqno = atomic_read(&dev_priv->marker_seq);
35719+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
35720 else {
35721 marker = list_first_entry(&queue->head,
35722 struct vmw_marker, head);
35723diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
35724index ceb3040..6160c5c 100644
35725--- a/drivers/hid/hid-core.c
35726+++ b/drivers/hid/hid-core.c
35727@@ -2242,7 +2242,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
35728
35729 int hid_add_device(struct hid_device *hdev)
35730 {
35731- static atomic_t id = ATOMIC_INIT(0);
35732+ static atomic_unchecked_t id = ATOMIC_INIT(0);
35733 int ret;
35734
35735 if (WARN_ON(hdev->status & HID_STAT_ADDED))
35736@@ -2276,7 +2276,7 @@ int hid_add_device(struct hid_device *hdev)
35737 /* XXX hack, any other cleaner solution after the driver core
35738 * is converted to allow more than 20 bytes as the device name? */
35739 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
35740- hdev->vendor, hdev->product, atomic_inc_return(&id));
35741+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
35742
35743 hid_debug_register(hdev, dev_name(&hdev->dev));
35744 ret = device_add(&hdev->dev);
35745diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
35746index eec3291..8ed706b 100644
35747--- a/drivers/hid/hid-wiimote-debug.c
35748+++ b/drivers/hid/hid-wiimote-debug.c
35749@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
35750 else if (size == 0)
35751 return -EIO;
35752
35753- if (copy_to_user(u, buf, size))
35754+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
35755 return -EFAULT;
35756
35757 *off += size;
35758diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
35759index 773a2f2..7ce08bc 100644
35760--- a/drivers/hv/channel.c
35761+++ b/drivers/hv/channel.c
35762@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
35763 int ret = 0;
35764 int t;
35765
35766- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
35767- atomic_inc(&vmbus_connection.next_gpadl_handle);
35768+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
35769+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
35770
35771 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
35772 if (ret)
35773diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
35774index 3648f8f..30ef30d 100644
35775--- a/drivers/hv/hv.c
35776+++ b/drivers/hv/hv.c
35777@@ -111,7 +111,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
35778 u64 output_address = (output) ? virt_to_phys(output) : 0;
35779 u32 output_address_hi = output_address >> 32;
35780 u32 output_address_lo = output_address & 0xFFFFFFFF;
35781- void *hypercall_page = hv_context.hypercall_page;
35782+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
35783
35784 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
35785 "=a"(hv_status_lo) : "d" (control_hi),
35786diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
35787index d8d1fad..b91caf7 100644
35788--- a/drivers/hv/hyperv_vmbus.h
35789+++ b/drivers/hv/hyperv_vmbus.h
35790@@ -594,7 +594,7 @@ enum vmbus_connect_state {
35791 struct vmbus_connection {
35792 enum vmbus_connect_state conn_state;
35793
35794- atomic_t next_gpadl_handle;
35795+ atomic_unchecked_t next_gpadl_handle;
35796
35797 /*
35798 * Represents channel interrupts. Each bit position represents a
35799diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
35800index 8e1a9ec..4687821 100644
35801--- a/drivers/hv/vmbus_drv.c
35802+++ b/drivers/hv/vmbus_drv.c
35803@@ -629,10 +629,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
35804 {
35805 int ret = 0;
35806
35807- static atomic_t device_num = ATOMIC_INIT(0);
35808+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35809
35810 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
35811- atomic_inc_return(&device_num));
35812+ atomic_inc_return_unchecked(&device_num));
35813
35814 child_device_obj->device.bus = &hv_bus;
35815 child_device_obj->device.parent = &hv_acpi_dev->dev;
35816diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
35817index 1672e2a..4a6297c 100644
35818--- a/drivers/hwmon/acpi_power_meter.c
35819+++ b/drivers/hwmon/acpi_power_meter.c
35820@@ -117,7 +117,7 @@ struct sensor_template {
35821 struct device_attribute *devattr,
35822 const char *buf, size_t count);
35823 int index;
35824-};
35825+} __do_const;
35826
35827 /* Averaging interval */
35828 static int update_avg_interval(struct acpi_power_meter_resource *resource)
35829@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
35830 struct sensor_template *attrs)
35831 {
35832 struct device *dev = &resource->acpi_dev->dev;
35833- struct sensor_device_attribute *sensors =
35834+ sensor_device_attribute_no_const *sensors =
35835 &resource->sensors[resource->num_sensors];
35836 int res = 0;
35837
35838diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
35839index b41baff..4953e4d 100644
35840--- a/drivers/hwmon/applesmc.c
35841+++ b/drivers/hwmon/applesmc.c
35842@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
35843 {
35844 struct applesmc_node_group *grp;
35845 struct applesmc_dev_attr *node;
35846- struct attribute *attr;
35847+ attribute_no_const *attr;
35848 int ret, i;
35849
35850 for (grp = groups; grp->format; grp++) {
35851diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
35852index 56dbcfb..9874bf1 100644
35853--- a/drivers/hwmon/asus_atk0110.c
35854+++ b/drivers/hwmon/asus_atk0110.c
35855@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
35856 struct atk_sensor_data {
35857 struct list_head list;
35858 struct atk_data *data;
35859- struct device_attribute label_attr;
35860- struct device_attribute input_attr;
35861- struct device_attribute limit1_attr;
35862- struct device_attribute limit2_attr;
35863+ device_attribute_no_const label_attr;
35864+ device_attribute_no_const input_attr;
35865+ device_attribute_no_const limit1_attr;
35866+ device_attribute_no_const limit2_attr;
35867 char label_attr_name[ATTR_NAME_SIZE];
35868 char input_attr_name[ATTR_NAME_SIZE];
35869 char limit1_attr_name[ATTR_NAME_SIZE];
35870@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
35871 static struct device_attribute atk_name_attr =
35872 __ATTR(name, 0444, atk_name_show, NULL);
35873
35874-static void atk_init_attribute(struct device_attribute *attr, char *name,
35875+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
35876 sysfs_show_func show)
35877 {
35878 sysfs_attr_init(&attr->attr);
35879diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
35880index d64923d..72591e8 100644
35881--- a/drivers/hwmon/coretemp.c
35882+++ b/drivers/hwmon/coretemp.c
35883@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
35884 return NOTIFY_OK;
35885 }
35886
35887-static struct notifier_block coretemp_cpu_notifier __refdata = {
35888+static struct notifier_block coretemp_cpu_notifier = {
35889 .notifier_call = coretemp_cpu_callback,
35890 };
35891
35892diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
35893index a14f634..2916ee2 100644
35894--- a/drivers/hwmon/ibmaem.c
35895+++ b/drivers/hwmon/ibmaem.c
35896@@ -925,7 +925,7 @@ static int aem_register_sensors(struct aem_data *data,
35897 struct aem_rw_sensor_template *rw)
35898 {
35899 struct device *dev = &data->pdev->dev;
35900- struct sensor_device_attribute *sensors = data->sensors;
35901+ sensor_device_attribute_no_const *sensors = data->sensors;
35902 int err;
35903
35904 /* Set up read-only sensors */
35905diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
35906index 7d19b1b..8fdaaac 100644
35907--- a/drivers/hwmon/pmbus/pmbus_core.c
35908+++ b/drivers/hwmon/pmbus/pmbus_core.c
35909@@ -811,7 +811,7 @@ static ssize_t pmbus_show_label(struct device *dev,
35910
35911 #define PMBUS_ADD_ATTR(data, _name, _idx, _mode, _type, _show, _set) \
35912 do { \
35913- struct sensor_device_attribute *a \
35914+ sensor_device_attribute_no_const *a \
35915 = &data->_type##s[data->num_##_type##s].attribute; \
35916 BUG_ON(data->num_attributes >= data->max_attributes); \
35917 sysfs_attr_init(&a->dev_attr.attr); \
35918diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
35919index 1c85d39..55ed3cf 100644
35920--- a/drivers/hwmon/sht15.c
35921+++ b/drivers/hwmon/sht15.c
35922@@ -169,7 +169,7 @@ struct sht15_data {
35923 int supply_uV;
35924 bool supply_uV_valid;
35925 struct work_struct update_supply_work;
35926- atomic_t interrupt_handled;
35927+ atomic_unchecked_t interrupt_handled;
35928 };
35929
35930 /**
35931@@ -512,13 +512,13 @@ static int sht15_measurement(struct sht15_data *data,
35932 return ret;
35933
35934 gpio_direction_input(data->pdata->gpio_data);
35935- atomic_set(&data->interrupt_handled, 0);
35936+ atomic_set_unchecked(&data->interrupt_handled, 0);
35937
35938 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35939 if (gpio_get_value(data->pdata->gpio_data) == 0) {
35940 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
35941 /* Only relevant if the interrupt hasn't occurred. */
35942- if (!atomic_read(&data->interrupt_handled))
35943+ if (!atomic_read_unchecked(&data->interrupt_handled))
35944 schedule_work(&data->read_work);
35945 }
35946 ret = wait_event_timeout(data->wait_queue,
35947@@ -785,7 +785,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
35948
35949 /* First disable the interrupt */
35950 disable_irq_nosync(irq);
35951- atomic_inc(&data->interrupt_handled);
35952+ atomic_inc_unchecked(&data->interrupt_handled);
35953 /* Then schedule a reading work struct */
35954 if (data->state != SHT15_READING_NOTHING)
35955 schedule_work(&data->read_work);
35956@@ -807,11 +807,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
35957 * If not, then start the interrupt again - care here as could
35958 * have gone low in meantime so verify it hasn't!
35959 */
35960- atomic_set(&data->interrupt_handled, 0);
35961+ atomic_set_unchecked(&data->interrupt_handled, 0);
35962 enable_irq(gpio_to_irq(data->pdata->gpio_data));
35963 /* If still not occurred or another handler was scheduled */
35964 if (gpio_get_value(data->pdata->gpio_data)
35965- || atomic_read(&data->interrupt_handled))
35966+ || atomic_read_unchecked(&data->interrupt_handled))
35967 return;
35968 }
35969
35970diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
35971index 76f157b..9c0db1b 100644
35972--- a/drivers/hwmon/via-cputemp.c
35973+++ b/drivers/hwmon/via-cputemp.c
35974@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
35975 return NOTIFY_OK;
35976 }
35977
35978-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
35979+static struct notifier_block via_cputemp_cpu_notifier = {
35980 .notifier_call = via_cputemp_cpu_callback,
35981 };
35982
35983diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
35984index 378fcb5..5e91fa8 100644
35985--- a/drivers/i2c/busses/i2c-amd756-s4882.c
35986+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
35987@@ -43,7 +43,7 @@
35988 extern struct i2c_adapter amd756_smbus;
35989
35990 static struct i2c_adapter *s4882_adapter;
35991-static struct i2c_algorithm *s4882_algo;
35992+static i2c_algorithm_no_const *s4882_algo;
35993
35994 /* Wrapper access functions for multiplexed SMBus */
35995 static DEFINE_MUTEX(amd756_lock);
35996diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
35997index 29015eb..af2d8e9 100644
35998--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
35999+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36000@@ -41,7 +41,7 @@
36001 extern struct i2c_adapter *nforce2_smbus;
36002
36003 static struct i2c_adapter *s4985_adapter;
36004-static struct i2c_algorithm *s4985_algo;
36005+static i2c_algorithm_no_const *s4985_algo;
36006
36007 /* Wrapper access functions for multiplexed SMBus */
36008 static DEFINE_MUTEX(nforce2_lock);
36009diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36010index 8126824..55a2798 100644
36011--- a/drivers/ide/ide-cd.c
36012+++ b/drivers/ide/ide-cd.c
36013@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
36014 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
36015 if ((unsigned long)buf & alignment
36016 || blk_rq_bytes(rq) & q->dma_pad_mask
36017- || object_is_on_stack(buf))
36018+ || object_starts_on_stack(buf))
36019 drive->dma = 0;
36020 }
36021 }
36022diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
36023index 8848f16..f8e6dd8 100644
36024--- a/drivers/iio/industrialio-core.c
36025+++ b/drivers/iio/industrialio-core.c
36026@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
36027 }
36028
36029 static
36030-int __iio_device_attr_init(struct device_attribute *dev_attr,
36031+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
36032 const char *postfix,
36033 struct iio_chan_spec const *chan,
36034 ssize_t (*readfunc)(struct device *dev,
36035diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
36036index 394fea2..c833880 100644
36037--- a/drivers/infiniband/core/cm.c
36038+++ b/drivers/infiniband/core/cm.c
36039@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
36040
36041 struct cm_counter_group {
36042 struct kobject obj;
36043- atomic_long_t counter[CM_ATTR_COUNT];
36044+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
36045 };
36046
36047 struct cm_counter_attribute {
36048@@ -1394,7 +1394,7 @@ static void cm_dup_req_handler(struct cm_work *work,
36049 struct ib_mad_send_buf *msg = NULL;
36050 int ret;
36051
36052- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36053+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36054 counter[CM_REQ_COUNTER]);
36055
36056 /* Quick state check to discard duplicate REQs. */
36057@@ -1778,7 +1778,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
36058 if (!cm_id_priv)
36059 return;
36060
36061- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36062+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36063 counter[CM_REP_COUNTER]);
36064 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
36065 if (ret)
36066@@ -1945,7 +1945,7 @@ static int cm_rtu_handler(struct cm_work *work)
36067 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
36068 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
36069 spin_unlock_irq(&cm_id_priv->lock);
36070- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36071+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36072 counter[CM_RTU_COUNTER]);
36073 goto out;
36074 }
36075@@ -2128,7 +2128,7 @@ static int cm_dreq_handler(struct cm_work *work)
36076 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
36077 dreq_msg->local_comm_id);
36078 if (!cm_id_priv) {
36079- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36080+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36081 counter[CM_DREQ_COUNTER]);
36082 cm_issue_drep(work->port, work->mad_recv_wc);
36083 return -EINVAL;
36084@@ -2153,7 +2153,7 @@ static int cm_dreq_handler(struct cm_work *work)
36085 case IB_CM_MRA_REP_RCVD:
36086 break;
36087 case IB_CM_TIMEWAIT:
36088- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36089+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36090 counter[CM_DREQ_COUNTER]);
36091 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36092 goto unlock;
36093@@ -2167,7 +2167,7 @@ static int cm_dreq_handler(struct cm_work *work)
36094 cm_free_msg(msg);
36095 goto deref;
36096 case IB_CM_DREQ_RCVD:
36097- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36098+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36099 counter[CM_DREQ_COUNTER]);
36100 goto unlock;
36101 default:
36102@@ -2534,7 +2534,7 @@ static int cm_mra_handler(struct cm_work *work)
36103 ib_modify_mad(cm_id_priv->av.port->mad_agent,
36104 cm_id_priv->msg, timeout)) {
36105 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
36106- atomic_long_inc(&work->port->
36107+ atomic_long_inc_unchecked(&work->port->
36108 counter_group[CM_RECV_DUPLICATES].
36109 counter[CM_MRA_COUNTER]);
36110 goto out;
36111@@ -2543,7 +2543,7 @@ static int cm_mra_handler(struct cm_work *work)
36112 break;
36113 case IB_CM_MRA_REQ_RCVD:
36114 case IB_CM_MRA_REP_RCVD:
36115- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36116+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36117 counter[CM_MRA_COUNTER]);
36118 /* fall through */
36119 default:
36120@@ -2705,7 +2705,7 @@ static int cm_lap_handler(struct cm_work *work)
36121 case IB_CM_LAP_IDLE:
36122 break;
36123 case IB_CM_MRA_LAP_SENT:
36124- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36125+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36126 counter[CM_LAP_COUNTER]);
36127 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
36128 goto unlock;
36129@@ -2721,7 +2721,7 @@ static int cm_lap_handler(struct cm_work *work)
36130 cm_free_msg(msg);
36131 goto deref;
36132 case IB_CM_LAP_RCVD:
36133- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36134+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36135 counter[CM_LAP_COUNTER]);
36136 goto unlock;
36137 default:
36138@@ -3005,7 +3005,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
36139 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
36140 if (cur_cm_id_priv) {
36141 spin_unlock_irq(&cm.lock);
36142- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
36143+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
36144 counter[CM_SIDR_REQ_COUNTER]);
36145 goto out; /* Duplicate message. */
36146 }
36147@@ -3217,10 +3217,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
36148 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
36149 msg->retries = 1;
36150
36151- atomic_long_add(1 + msg->retries,
36152+ atomic_long_add_unchecked(1 + msg->retries,
36153 &port->counter_group[CM_XMIT].counter[attr_index]);
36154 if (msg->retries)
36155- atomic_long_add(msg->retries,
36156+ atomic_long_add_unchecked(msg->retries,
36157 &port->counter_group[CM_XMIT_RETRIES].
36158 counter[attr_index]);
36159
36160@@ -3430,7 +3430,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
36161 }
36162
36163 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
36164- atomic_long_inc(&port->counter_group[CM_RECV].
36165+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
36166 counter[attr_id - CM_ATTR_ID_OFFSET]);
36167
36168 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
36169@@ -3635,7 +3635,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
36170 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
36171
36172 return sprintf(buf, "%ld\n",
36173- atomic_long_read(&group->counter[cm_attr->index]));
36174+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
36175 }
36176
36177 static const struct sysfs_ops cm_counter_ops = {
36178diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
36179index 176c8f9..2627b62 100644
36180--- a/drivers/infiniband/core/fmr_pool.c
36181+++ b/drivers/infiniband/core/fmr_pool.c
36182@@ -98,8 +98,8 @@ struct ib_fmr_pool {
36183
36184 struct task_struct *thread;
36185
36186- atomic_t req_ser;
36187- atomic_t flush_ser;
36188+ atomic_unchecked_t req_ser;
36189+ atomic_unchecked_t flush_ser;
36190
36191 wait_queue_head_t force_wait;
36192 };
36193@@ -180,10 +180,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36194 struct ib_fmr_pool *pool = pool_ptr;
36195
36196 do {
36197- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
36198+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
36199 ib_fmr_batch_release(pool);
36200
36201- atomic_inc(&pool->flush_ser);
36202+ atomic_inc_unchecked(&pool->flush_ser);
36203 wake_up_interruptible(&pool->force_wait);
36204
36205 if (pool->flush_function)
36206@@ -191,7 +191,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
36207 }
36208
36209 set_current_state(TASK_INTERRUPTIBLE);
36210- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
36211+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
36212 !kthread_should_stop())
36213 schedule();
36214 __set_current_state(TASK_RUNNING);
36215@@ -283,8 +283,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
36216 pool->dirty_watermark = params->dirty_watermark;
36217 pool->dirty_len = 0;
36218 spin_lock_init(&pool->pool_lock);
36219- atomic_set(&pool->req_ser, 0);
36220- atomic_set(&pool->flush_ser, 0);
36221+ atomic_set_unchecked(&pool->req_ser, 0);
36222+ atomic_set_unchecked(&pool->flush_ser, 0);
36223 init_waitqueue_head(&pool->force_wait);
36224
36225 pool->thread = kthread_run(ib_fmr_cleanup_thread,
36226@@ -412,11 +412,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
36227 }
36228 spin_unlock_irq(&pool->pool_lock);
36229
36230- serial = atomic_inc_return(&pool->req_ser);
36231+ serial = atomic_inc_return_unchecked(&pool->req_ser);
36232 wake_up_process(pool->thread);
36233
36234 if (wait_event_interruptible(pool->force_wait,
36235- atomic_read(&pool->flush_ser) - serial >= 0))
36236+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
36237 return -EINTR;
36238
36239 return 0;
36240@@ -526,7 +526,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
36241 } else {
36242 list_add_tail(&fmr->list, &pool->dirty_list);
36243 if (++pool->dirty_len >= pool->dirty_watermark) {
36244- atomic_inc(&pool->req_ser);
36245+ atomic_inc_unchecked(&pool->req_ser);
36246 wake_up_process(pool->thread);
36247 }
36248 }
36249diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
36250index afd8179..598063f 100644
36251--- a/drivers/infiniband/hw/cxgb4/mem.c
36252+++ b/drivers/infiniband/hw/cxgb4/mem.c
36253@@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36254 int err;
36255 struct fw_ri_tpte tpt;
36256 u32 stag_idx;
36257- static atomic_t key;
36258+ static atomic_unchecked_t key;
36259
36260 if (c4iw_fatal_error(rdev))
36261 return -EIO;
36262@@ -139,7 +139,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
36263 if (rdev->stats.stag.cur > rdev->stats.stag.max)
36264 rdev->stats.stag.max = rdev->stats.stag.cur;
36265 mutex_unlock(&rdev->stats.lock);
36266- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
36267+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
36268 }
36269 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
36270 __func__, stag_state, type, pdid, stag_idx);
36271diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
36272index 79b3dbc..96e5fcc 100644
36273--- a/drivers/infiniband/hw/ipath/ipath_rc.c
36274+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
36275@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36276 struct ib_atomic_eth *ateth;
36277 struct ipath_ack_entry *e;
36278 u64 vaddr;
36279- atomic64_t *maddr;
36280+ atomic64_unchecked_t *maddr;
36281 u64 sdata;
36282 u32 rkey;
36283 u8 next;
36284@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
36285 IB_ACCESS_REMOTE_ATOMIC)))
36286 goto nack_acc_unlck;
36287 /* Perform atomic OP and save result. */
36288- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36289+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36290 sdata = be64_to_cpu(ateth->swap_data);
36291 e = &qp->s_ack_queue[qp->r_head_ack_queue];
36292 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
36293- (u64) atomic64_add_return(sdata, maddr) - sdata :
36294+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36295 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36296 be64_to_cpu(ateth->compare_data),
36297 sdata);
36298diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
36299index 1f95bba..9530f87 100644
36300--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
36301+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
36302@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
36303 unsigned long flags;
36304 struct ib_wc wc;
36305 u64 sdata;
36306- atomic64_t *maddr;
36307+ atomic64_unchecked_t *maddr;
36308 enum ib_wc_status send_status;
36309
36310 /*
36311@@ -382,11 +382,11 @@ again:
36312 IB_ACCESS_REMOTE_ATOMIC)))
36313 goto acc_err;
36314 /* Perform atomic OP and save result. */
36315- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
36316+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
36317 sdata = wqe->wr.wr.atomic.compare_add;
36318 *(u64 *) sqp->s_sge.sge.vaddr =
36319 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
36320- (u64) atomic64_add_return(sdata, maddr) - sdata :
36321+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
36322 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
36323 sdata, wqe->wr.wr.atomic.swap);
36324 goto send_comp;
36325diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
36326index 5b152a3..c1f3e83 100644
36327--- a/drivers/infiniband/hw/nes/nes.c
36328+++ b/drivers/infiniband/hw/nes/nes.c
36329@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
36330 LIST_HEAD(nes_adapter_list);
36331 static LIST_HEAD(nes_dev_list);
36332
36333-atomic_t qps_destroyed;
36334+atomic_unchecked_t qps_destroyed;
36335
36336 static unsigned int ee_flsh_adapter;
36337 static unsigned int sysfs_nonidx_addr;
36338@@ -267,7 +267,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
36339 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
36340 struct nes_adapter *nesadapter = nesdev->nesadapter;
36341
36342- atomic_inc(&qps_destroyed);
36343+ atomic_inc_unchecked(&qps_destroyed);
36344
36345 /* Free the control structures */
36346
36347diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
36348index 33cc589..3bd6538 100644
36349--- a/drivers/infiniband/hw/nes/nes.h
36350+++ b/drivers/infiniband/hw/nes/nes.h
36351@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
36352 extern unsigned int wqm_quanta;
36353 extern struct list_head nes_adapter_list;
36354
36355-extern atomic_t cm_connects;
36356-extern atomic_t cm_accepts;
36357-extern atomic_t cm_disconnects;
36358-extern atomic_t cm_closes;
36359-extern atomic_t cm_connecteds;
36360-extern atomic_t cm_connect_reqs;
36361-extern atomic_t cm_rejects;
36362-extern atomic_t mod_qp_timouts;
36363-extern atomic_t qps_created;
36364-extern atomic_t qps_destroyed;
36365-extern atomic_t sw_qps_destroyed;
36366+extern atomic_unchecked_t cm_connects;
36367+extern atomic_unchecked_t cm_accepts;
36368+extern atomic_unchecked_t cm_disconnects;
36369+extern atomic_unchecked_t cm_closes;
36370+extern atomic_unchecked_t cm_connecteds;
36371+extern atomic_unchecked_t cm_connect_reqs;
36372+extern atomic_unchecked_t cm_rejects;
36373+extern atomic_unchecked_t mod_qp_timouts;
36374+extern atomic_unchecked_t qps_created;
36375+extern atomic_unchecked_t qps_destroyed;
36376+extern atomic_unchecked_t sw_qps_destroyed;
36377 extern u32 mh_detected;
36378 extern u32 mh_pauses_sent;
36379 extern u32 cm_packets_sent;
36380@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
36381 extern u32 cm_packets_received;
36382 extern u32 cm_packets_dropped;
36383 extern u32 cm_packets_retrans;
36384-extern atomic_t cm_listens_created;
36385-extern atomic_t cm_listens_destroyed;
36386+extern atomic_unchecked_t cm_listens_created;
36387+extern atomic_unchecked_t cm_listens_destroyed;
36388 extern u32 cm_backlog_drops;
36389-extern atomic_t cm_loopbacks;
36390-extern atomic_t cm_nodes_created;
36391-extern atomic_t cm_nodes_destroyed;
36392-extern atomic_t cm_accel_dropped_pkts;
36393-extern atomic_t cm_resets_recvd;
36394-extern atomic_t pau_qps_created;
36395-extern atomic_t pau_qps_destroyed;
36396+extern atomic_unchecked_t cm_loopbacks;
36397+extern atomic_unchecked_t cm_nodes_created;
36398+extern atomic_unchecked_t cm_nodes_destroyed;
36399+extern atomic_unchecked_t cm_accel_dropped_pkts;
36400+extern atomic_unchecked_t cm_resets_recvd;
36401+extern atomic_unchecked_t pau_qps_created;
36402+extern atomic_unchecked_t pau_qps_destroyed;
36403
36404 extern u32 int_mod_timer_init;
36405 extern u32 int_mod_cq_depth_256;
36406diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
36407index 22ea67e..dcbe3bc 100644
36408--- a/drivers/infiniband/hw/nes/nes_cm.c
36409+++ b/drivers/infiniband/hw/nes/nes_cm.c
36410@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
36411 u32 cm_packets_retrans;
36412 u32 cm_packets_created;
36413 u32 cm_packets_received;
36414-atomic_t cm_listens_created;
36415-atomic_t cm_listens_destroyed;
36416+atomic_unchecked_t cm_listens_created;
36417+atomic_unchecked_t cm_listens_destroyed;
36418 u32 cm_backlog_drops;
36419-atomic_t cm_loopbacks;
36420-atomic_t cm_nodes_created;
36421-atomic_t cm_nodes_destroyed;
36422-atomic_t cm_accel_dropped_pkts;
36423-atomic_t cm_resets_recvd;
36424+atomic_unchecked_t cm_loopbacks;
36425+atomic_unchecked_t cm_nodes_created;
36426+atomic_unchecked_t cm_nodes_destroyed;
36427+atomic_unchecked_t cm_accel_dropped_pkts;
36428+atomic_unchecked_t cm_resets_recvd;
36429
36430 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
36431 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
36432@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
36433
36434 static struct nes_cm_core *g_cm_core;
36435
36436-atomic_t cm_connects;
36437-atomic_t cm_accepts;
36438-atomic_t cm_disconnects;
36439-atomic_t cm_closes;
36440-atomic_t cm_connecteds;
36441-atomic_t cm_connect_reqs;
36442-atomic_t cm_rejects;
36443+atomic_unchecked_t cm_connects;
36444+atomic_unchecked_t cm_accepts;
36445+atomic_unchecked_t cm_disconnects;
36446+atomic_unchecked_t cm_closes;
36447+atomic_unchecked_t cm_connecteds;
36448+atomic_unchecked_t cm_connect_reqs;
36449+atomic_unchecked_t cm_rejects;
36450
36451 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
36452 {
36453@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
36454 kfree(listener);
36455 listener = NULL;
36456 ret = 0;
36457- atomic_inc(&cm_listens_destroyed);
36458+ atomic_inc_unchecked(&cm_listens_destroyed);
36459 } else {
36460 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
36461 }
36462@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
36463 cm_node->rem_mac);
36464
36465 add_hte_node(cm_core, cm_node);
36466- atomic_inc(&cm_nodes_created);
36467+ atomic_inc_unchecked(&cm_nodes_created);
36468
36469 return cm_node;
36470 }
36471@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
36472 }
36473
36474 atomic_dec(&cm_core->node_cnt);
36475- atomic_inc(&cm_nodes_destroyed);
36476+ atomic_inc_unchecked(&cm_nodes_destroyed);
36477 nesqp = cm_node->nesqp;
36478 if (nesqp) {
36479 nesqp->cm_node = NULL;
36480@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
36481
36482 static void drop_packet(struct sk_buff *skb)
36483 {
36484- atomic_inc(&cm_accel_dropped_pkts);
36485+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36486 dev_kfree_skb_any(skb);
36487 }
36488
36489@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
36490 {
36491
36492 int reset = 0; /* whether to send reset in case of err.. */
36493- atomic_inc(&cm_resets_recvd);
36494+ atomic_inc_unchecked(&cm_resets_recvd);
36495 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
36496 " refcnt=%d\n", cm_node, cm_node->state,
36497 atomic_read(&cm_node->ref_count));
36498@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
36499 rem_ref_cm_node(cm_node->cm_core, cm_node);
36500 return NULL;
36501 }
36502- atomic_inc(&cm_loopbacks);
36503+ atomic_inc_unchecked(&cm_loopbacks);
36504 loopbackremotenode->loopbackpartner = cm_node;
36505 loopbackremotenode->tcp_cntxt.rcv_wscale =
36506 NES_CM_DEFAULT_RCV_WND_SCALE;
36507@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
36508 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
36509 else {
36510 rem_ref_cm_node(cm_core, cm_node);
36511- atomic_inc(&cm_accel_dropped_pkts);
36512+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
36513 dev_kfree_skb_any(skb);
36514 }
36515 break;
36516@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36517
36518 if ((cm_id) && (cm_id->event_handler)) {
36519 if (issue_disconn) {
36520- atomic_inc(&cm_disconnects);
36521+ atomic_inc_unchecked(&cm_disconnects);
36522 cm_event.event = IW_CM_EVENT_DISCONNECT;
36523 cm_event.status = disconn_status;
36524 cm_event.local_addr = cm_id->local_addr;
36525@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
36526 }
36527
36528 if (issue_close) {
36529- atomic_inc(&cm_closes);
36530+ atomic_inc_unchecked(&cm_closes);
36531 nes_disconnect(nesqp, 1);
36532
36533 cm_id->provider_data = nesqp;
36534@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36535
36536 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
36537 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
36538- atomic_inc(&cm_accepts);
36539+ atomic_inc_unchecked(&cm_accepts);
36540
36541 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
36542 netdev_refcnt_read(nesvnic->netdev));
36543@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
36544 struct nes_cm_core *cm_core;
36545 u8 *start_buff;
36546
36547- atomic_inc(&cm_rejects);
36548+ atomic_inc_unchecked(&cm_rejects);
36549 cm_node = (struct nes_cm_node *)cm_id->provider_data;
36550 loopback = cm_node->loopbackpartner;
36551 cm_core = cm_node->cm_core;
36552@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
36553 ntohl(cm_id->local_addr.sin_addr.s_addr),
36554 ntohs(cm_id->local_addr.sin_port));
36555
36556- atomic_inc(&cm_connects);
36557+ atomic_inc_unchecked(&cm_connects);
36558 nesqp->active_conn = 1;
36559
36560 /* cache the cm_id in the qp */
36561@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
36562 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
36563 return err;
36564 }
36565- atomic_inc(&cm_listens_created);
36566+ atomic_inc_unchecked(&cm_listens_created);
36567 }
36568
36569 cm_id->add_ref(cm_id);
36570@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
36571
36572 if (nesqp->destroyed)
36573 return;
36574- atomic_inc(&cm_connecteds);
36575+ atomic_inc_unchecked(&cm_connecteds);
36576 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
36577 " local port 0x%04X. jiffies = %lu.\n",
36578 nesqp->hwqp.qp_id,
36579@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
36580
36581 cm_id->add_ref(cm_id);
36582 ret = cm_id->event_handler(cm_id, &cm_event);
36583- atomic_inc(&cm_closes);
36584+ atomic_inc_unchecked(&cm_closes);
36585 cm_event.event = IW_CM_EVENT_CLOSE;
36586 cm_event.status = 0;
36587 cm_event.provider_data = cm_id->provider_data;
36588@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
36589 return;
36590 cm_id = cm_node->cm_id;
36591
36592- atomic_inc(&cm_connect_reqs);
36593+ atomic_inc_unchecked(&cm_connect_reqs);
36594 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36595 cm_node, cm_id, jiffies);
36596
36597@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
36598 return;
36599 cm_id = cm_node->cm_id;
36600
36601- atomic_inc(&cm_connect_reqs);
36602+ atomic_inc_unchecked(&cm_connect_reqs);
36603 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
36604 cm_node, cm_id, jiffies);
36605
36606diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
36607index 4166452..fc952c3 100644
36608--- a/drivers/infiniband/hw/nes/nes_mgt.c
36609+++ b/drivers/infiniband/hw/nes/nes_mgt.c
36610@@ -40,8 +40,8 @@
36611 #include "nes.h"
36612 #include "nes_mgt.h"
36613
36614-atomic_t pau_qps_created;
36615-atomic_t pau_qps_destroyed;
36616+atomic_unchecked_t pau_qps_created;
36617+atomic_unchecked_t pau_qps_destroyed;
36618
36619 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
36620 {
36621@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
36622 {
36623 struct sk_buff *skb;
36624 unsigned long flags;
36625- atomic_inc(&pau_qps_destroyed);
36626+ atomic_inc_unchecked(&pau_qps_destroyed);
36627
36628 /* Free packets that have not yet been forwarded */
36629 /* Lock is acquired by skb_dequeue when removing the skb */
36630@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
36631 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
36632 skb_queue_head_init(&nesqp->pau_list);
36633 spin_lock_init(&nesqp->pau_lock);
36634- atomic_inc(&pau_qps_created);
36635+ atomic_inc_unchecked(&pau_qps_created);
36636 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
36637 }
36638
36639diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
36640index 9542e16..a008c40 100644
36641--- a/drivers/infiniband/hw/nes/nes_nic.c
36642+++ b/drivers/infiniband/hw/nes/nes_nic.c
36643@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
36644 target_stat_values[++index] = mh_detected;
36645 target_stat_values[++index] = mh_pauses_sent;
36646 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
36647- target_stat_values[++index] = atomic_read(&cm_connects);
36648- target_stat_values[++index] = atomic_read(&cm_accepts);
36649- target_stat_values[++index] = atomic_read(&cm_disconnects);
36650- target_stat_values[++index] = atomic_read(&cm_connecteds);
36651- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
36652- target_stat_values[++index] = atomic_read(&cm_rejects);
36653- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
36654- target_stat_values[++index] = atomic_read(&qps_created);
36655- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
36656- target_stat_values[++index] = atomic_read(&qps_destroyed);
36657- target_stat_values[++index] = atomic_read(&cm_closes);
36658+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
36659+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
36660+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
36661+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
36662+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
36663+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
36664+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
36665+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
36666+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
36667+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
36668+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
36669 target_stat_values[++index] = cm_packets_sent;
36670 target_stat_values[++index] = cm_packets_bounced;
36671 target_stat_values[++index] = cm_packets_created;
36672 target_stat_values[++index] = cm_packets_received;
36673 target_stat_values[++index] = cm_packets_dropped;
36674 target_stat_values[++index] = cm_packets_retrans;
36675- target_stat_values[++index] = atomic_read(&cm_listens_created);
36676- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
36677+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
36678+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
36679 target_stat_values[++index] = cm_backlog_drops;
36680- target_stat_values[++index] = atomic_read(&cm_loopbacks);
36681- target_stat_values[++index] = atomic_read(&cm_nodes_created);
36682- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
36683- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
36684- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
36685+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
36686+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
36687+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
36688+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
36689+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
36690 target_stat_values[++index] = nesadapter->free_4kpbl;
36691 target_stat_values[++index] = nesadapter->free_256pbl;
36692 target_stat_values[++index] = int_mod_timer_init;
36693 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
36694 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
36695 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
36696- target_stat_values[++index] = atomic_read(&pau_qps_created);
36697- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
36698+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
36699+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
36700 }
36701
36702 /**
36703diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
36704index 07e4fba..685f041 100644
36705--- a/drivers/infiniband/hw/nes/nes_verbs.c
36706+++ b/drivers/infiniband/hw/nes/nes_verbs.c
36707@@ -46,9 +46,9 @@
36708
36709 #include <rdma/ib_umem.h>
36710
36711-atomic_t mod_qp_timouts;
36712-atomic_t qps_created;
36713-atomic_t sw_qps_destroyed;
36714+atomic_unchecked_t mod_qp_timouts;
36715+atomic_unchecked_t qps_created;
36716+atomic_unchecked_t sw_qps_destroyed;
36717
36718 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
36719
36720@@ -1131,7 +1131,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
36721 if (init_attr->create_flags)
36722 return ERR_PTR(-EINVAL);
36723
36724- atomic_inc(&qps_created);
36725+ atomic_inc_unchecked(&qps_created);
36726 switch (init_attr->qp_type) {
36727 case IB_QPT_RC:
36728 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
36729@@ -1462,7 +1462,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
36730 struct iw_cm_event cm_event;
36731 int ret = 0;
36732
36733- atomic_inc(&sw_qps_destroyed);
36734+ atomic_inc_unchecked(&sw_qps_destroyed);
36735 nesqp->destroyed = 1;
36736
36737 /* Blow away the connection if it exists. */
36738diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
36739index 4d11575..3e890e5 100644
36740--- a/drivers/infiniband/hw/qib/qib.h
36741+++ b/drivers/infiniband/hw/qib/qib.h
36742@@ -51,6 +51,7 @@
36743 #include <linux/completion.h>
36744 #include <linux/kref.h>
36745 #include <linux/sched.h>
36746+#include <linux/slab.h>
36747
36748 #include "qib_common.h"
36749 #include "qib_verbs.h"
36750diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
36751index da739d9..da1c7f4 100644
36752--- a/drivers/input/gameport/gameport.c
36753+++ b/drivers/input/gameport/gameport.c
36754@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
36755 */
36756 static void gameport_init_port(struct gameport *gameport)
36757 {
36758- static atomic_t gameport_no = ATOMIC_INIT(0);
36759+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
36760
36761 __module_get(THIS_MODULE);
36762
36763 mutex_init(&gameport->drv_mutex);
36764 device_initialize(&gameport->dev);
36765 dev_set_name(&gameport->dev, "gameport%lu",
36766- (unsigned long)atomic_inc_return(&gameport_no) - 1);
36767+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
36768 gameport->dev.bus = &gameport_bus;
36769 gameport->dev.release = gameport_release_port;
36770 if (gameport->parent)
36771diff --git a/drivers/input/input.c b/drivers/input/input.c
36772index c044699..174d71a 100644
36773--- a/drivers/input/input.c
36774+++ b/drivers/input/input.c
36775@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
36776 */
36777 int input_register_device(struct input_dev *dev)
36778 {
36779- static atomic_t input_no = ATOMIC_INIT(0);
36780+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
36781 struct input_devres *devres = NULL;
36782 struct input_handler *handler;
36783 unsigned int packet_size;
36784@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
36785 dev->setkeycode = input_default_setkeycode;
36786
36787 dev_set_name(&dev->dev, "input%ld",
36788- (unsigned long) atomic_inc_return(&input_no) - 1);
36789+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
36790
36791 error = device_add(&dev->dev);
36792 if (error)
36793diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
36794index 04c69af..5f92d00 100644
36795--- a/drivers/input/joystick/sidewinder.c
36796+++ b/drivers/input/joystick/sidewinder.c
36797@@ -30,6 +30,7 @@
36798 #include <linux/kernel.h>
36799 #include <linux/module.h>
36800 #include <linux/slab.h>
36801+#include <linux/sched.h>
36802 #include <linux/init.h>
36803 #include <linux/input.h>
36804 #include <linux/gameport.h>
36805diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
36806index d6cbfe9..6225402 100644
36807--- a/drivers/input/joystick/xpad.c
36808+++ b/drivers/input/joystick/xpad.c
36809@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
36810
36811 static int xpad_led_probe(struct usb_xpad *xpad)
36812 {
36813- static atomic_t led_seq = ATOMIC_INIT(0);
36814+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
36815 long led_no;
36816 struct xpad_led *led;
36817 struct led_classdev *led_cdev;
36818@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
36819 if (!led)
36820 return -ENOMEM;
36821
36822- led_no = (long)atomic_inc_return(&led_seq) - 1;
36823+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
36824
36825 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
36826 led->xpad = xpad;
36827diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
36828index fe1df23..5b710f3 100644
36829--- a/drivers/input/mouse/psmouse.h
36830+++ b/drivers/input/mouse/psmouse.h
36831@@ -115,7 +115,7 @@ struct psmouse_attribute {
36832 ssize_t (*set)(struct psmouse *psmouse, void *data,
36833 const char *buf, size_t count);
36834 bool protect;
36835-};
36836+} __do_const;
36837 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
36838
36839 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
36840diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
36841index 4c842c3..590b0bf 100644
36842--- a/drivers/input/mousedev.c
36843+++ b/drivers/input/mousedev.c
36844@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
36845
36846 spin_unlock_irq(&client->packet_lock);
36847
36848- if (copy_to_user(buffer, data, count))
36849+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
36850 return -EFAULT;
36851
36852 return count;
36853diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
36854index 25fc597..558bf3b 100644
36855--- a/drivers/input/serio/serio.c
36856+++ b/drivers/input/serio/serio.c
36857@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
36858 */
36859 static void serio_init_port(struct serio *serio)
36860 {
36861- static atomic_t serio_no = ATOMIC_INIT(0);
36862+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
36863
36864 __module_get(THIS_MODULE);
36865
36866@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
36867 mutex_init(&serio->drv_mutex);
36868 device_initialize(&serio->dev);
36869 dev_set_name(&serio->dev, "serio%ld",
36870- (long)atomic_inc_return(&serio_no) - 1);
36871+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
36872 serio->dev.bus = &serio_bus;
36873 serio->dev.release = serio_release_port;
36874 serio->dev.groups = serio_device_attr_groups;
36875diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
36876index ddbdaca..be18a78 100644
36877--- a/drivers/iommu/iommu.c
36878+++ b/drivers/iommu/iommu.c
36879@@ -554,7 +554,7 @@ static struct notifier_block iommu_bus_nb = {
36880 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
36881 {
36882 bus_register_notifier(bus, &iommu_bus_nb);
36883- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
36884+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
36885 }
36886
36887 /**
36888diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
36889index 89562a8..218999b 100644
36890--- a/drivers/isdn/capi/capi.c
36891+++ b/drivers/isdn/capi/capi.c
36892@@ -81,8 +81,8 @@ struct capiminor {
36893
36894 struct capi20_appl *ap;
36895 u32 ncci;
36896- atomic_t datahandle;
36897- atomic_t msgid;
36898+ atomic_unchecked_t datahandle;
36899+ atomic_unchecked_t msgid;
36900
36901 struct tty_port port;
36902 int ttyinstop;
36903@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
36904 capimsg_setu16(s, 2, mp->ap->applid);
36905 capimsg_setu8 (s, 4, CAPI_DATA_B3);
36906 capimsg_setu8 (s, 5, CAPI_RESP);
36907- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
36908+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
36909 capimsg_setu32(s, 8, mp->ncci);
36910 capimsg_setu16(s, 12, datahandle);
36911 }
36912@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
36913 mp->outbytes -= len;
36914 spin_unlock_bh(&mp->outlock);
36915
36916- datahandle = atomic_inc_return(&mp->datahandle);
36917+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
36918 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
36919 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
36920 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
36921 capimsg_setu16(skb->data, 2, mp->ap->applid);
36922 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
36923 capimsg_setu8 (skb->data, 5, CAPI_REQ);
36924- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
36925+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
36926 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
36927 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
36928 capimsg_setu16(skb->data, 16, len); /* Data length */
36929diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
36930index 67abf3f..076b3a6 100644
36931--- a/drivers/isdn/gigaset/interface.c
36932+++ b/drivers/isdn/gigaset/interface.c
36933@@ -160,9 +160,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
36934 }
36935 tty->driver_data = cs;
36936
36937- ++cs->port.count;
36938+ atomic_inc(&cs->port.count);
36939
36940- if (cs->port.count == 1) {
36941+ if (atomic_read(&cs->port.count) == 1) {
36942 tty_port_tty_set(&cs->port, tty);
36943 tty->low_latency = 1;
36944 }
36945@@ -186,9 +186,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
36946
36947 if (!cs->connected)
36948 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
36949- else if (!cs->port.count)
36950+ else if (!atomic_read(&cs->port.count))
36951 dev_warn(cs->dev, "%s: device not opened\n", __func__);
36952- else if (!--cs->port.count)
36953+ else if (!atomic_dec_return(&cs->port.count))
36954 tty_port_tty_set(&cs->port, NULL);
36955
36956 mutex_unlock(&cs->mutex);
36957diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
36958index 821f7ac..28d4030 100644
36959--- a/drivers/isdn/hardware/avm/b1.c
36960+++ b/drivers/isdn/hardware/avm/b1.c
36961@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
36962 }
36963 if (left) {
36964 if (t4file->user) {
36965- if (copy_from_user(buf, dp, left))
36966+ if (left > sizeof buf || copy_from_user(buf, dp, left))
36967 return -EFAULT;
36968 } else {
36969 memcpy(buf, dp, left);
36970@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
36971 }
36972 if (left) {
36973 if (config->user) {
36974- if (copy_from_user(buf, dp, left))
36975+ if (left > sizeof buf || copy_from_user(buf, dp, left))
36976 return -EFAULT;
36977 } else {
36978 memcpy(buf, dp, left);
36979diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
36980index e09dc8a..15e2efb 100644
36981--- a/drivers/isdn/i4l/isdn_tty.c
36982+++ b/drivers/isdn/i4l/isdn_tty.c
36983@@ -1513,9 +1513,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
36984
36985 #ifdef ISDN_DEBUG_MODEM_OPEN
36986 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
36987- port->count);
36988+ atomic_read(&port->count));
36989 #endif
36990- port->count++;
36991+ atomic_inc(&port->count);
36992 port->tty = tty;
36993 /*
36994 * Start up serial port
36995@@ -1559,7 +1559,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
36996 #endif
36997 return;
36998 }
36999- if ((tty->count == 1) && (port->count != 1)) {
37000+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
37001 /*
37002 * Uh, oh. tty->count is 1, which means that the tty
37003 * structure will be freed. Info->count should always
37004@@ -1568,15 +1568,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
37005 * serial port won't be shutdown.
37006 */
37007 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
37008- "info->count is %d\n", port->count);
37009- port->count = 1;
37010+ "info->count is %d\n", atomic_read(&port->count));
37011+ atomic_set(&port->count, 1);
37012 }
37013- if (--port->count < 0) {
37014+ if (atomic_dec_return(&port->count) < 0) {
37015 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
37016- info->line, port->count);
37017- port->count = 0;
37018+ info->line, atomic_read(&port->count));
37019+ atomic_set(&port->count, 0);
37020 }
37021- if (port->count) {
37022+ if (atomic_read(&port->count)) {
37023 #ifdef ISDN_DEBUG_MODEM_OPEN
37024 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
37025 #endif
37026@@ -1630,7 +1630,7 @@ isdn_tty_hangup(struct tty_struct *tty)
37027 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
37028 return;
37029 isdn_tty_shutdown(info);
37030- port->count = 0;
37031+ atomic_set(&port->count, 0);
37032 port->flags &= ~ASYNC_NORMAL_ACTIVE;
37033 port->tty = NULL;
37034 wake_up_interruptible(&port->open_wait);
37035@@ -1975,7 +1975,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
37036 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
37037 modem_info *info = &dev->mdm.info[i];
37038
37039- if (info->port.count == 0)
37040+ if (atomic_read(&info->port.count) == 0)
37041 continue;
37042 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
37043 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
37044diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
37045index e74df7c..03a03ba 100644
37046--- a/drivers/isdn/icn/icn.c
37047+++ b/drivers/isdn/icn/icn.c
37048@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
37049 if (count > len)
37050 count = len;
37051 if (user) {
37052- if (copy_from_user(msg, buf, count))
37053+ if (count > sizeof msg || copy_from_user(msg, buf, count))
37054 return -EFAULT;
37055 } else
37056 memcpy(msg, buf, count);
37057diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
37058index 6a8405d..0bd1c7e 100644
37059--- a/drivers/leds/leds-clevo-mail.c
37060+++ b/drivers/leds/leds-clevo-mail.c
37061@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
37062 * detected as working, but in reality it is not) as low as
37063 * possible.
37064 */
37065-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
37066+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
37067 {
37068 .callback = clevo_mail_led_dmi_callback,
37069 .ident = "Clevo D410J",
37070diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
37071index ec9b287..65c9bf4 100644
37072--- a/drivers/leds/leds-ss4200.c
37073+++ b/drivers/leds/leds-ss4200.c
37074@@ -92,7 +92,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
37075 * detected as working, but in reality it is not) as low as
37076 * possible.
37077 */
37078-static struct dmi_system_id __initdata nas_led_whitelist[] = {
37079+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
37080 {
37081 .callback = ss4200_led_dmi_callback,
37082 .ident = "Intel SS4200-E",
37083diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
37084index a5ebc00..982886f 100644
37085--- a/drivers/lguest/core.c
37086+++ b/drivers/lguest/core.c
37087@@ -92,9 +92,17 @@ static __init int map_switcher(void)
37088 * it's worked so far. The end address needs +1 because __get_vm_area
37089 * allocates an extra guard page, so we need space for that.
37090 */
37091+
37092+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
37093+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37094+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
37095+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37096+#else
37097 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
37098 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
37099 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
37100+#endif
37101+
37102 if (!switcher_vma) {
37103 err = -ENOMEM;
37104 printk("lguest: could not map switcher pages high\n");
37105@@ -119,7 +127,7 @@ static __init int map_switcher(void)
37106 * Now the Switcher is mapped at the right address, we can't fail!
37107 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
37108 */
37109- memcpy(switcher_vma->addr, start_switcher_text,
37110+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
37111 end_switcher_text - start_switcher_text);
37112
37113 printk(KERN_INFO "lguest: mapped switcher at %p\n",
37114diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
37115index 4af12e1..0e89afe 100644
37116--- a/drivers/lguest/x86/core.c
37117+++ b/drivers/lguest/x86/core.c
37118@@ -59,7 +59,7 @@ static struct {
37119 /* Offset from where switcher.S was compiled to where we've copied it */
37120 static unsigned long switcher_offset(void)
37121 {
37122- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
37123+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
37124 }
37125
37126 /* This cpu's struct lguest_pages. */
37127@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
37128 * These copies are pretty cheap, so we do them unconditionally: */
37129 /* Save the current Host top-level page directory.
37130 */
37131+
37132+#ifdef CONFIG_PAX_PER_CPU_PGD
37133+ pages->state.host_cr3 = read_cr3();
37134+#else
37135 pages->state.host_cr3 = __pa(current->mm->pgd);
37136+#endif
37137+
37138 /*
37139 * Set up the Guest's page tables to see this CPU's pages (and no
37140 * other CPU's pages).
37141@@ -476,7 +482,7 @@ void __init lguest_arch_host_init(void)
37142 * compiled-in switcher code and the high-mapped copy we just made.
37143 */
37144 for (i = 0; i < IDT_ENTRIES; i++)
37145- default_idt_entries[i] += switcher_offset();
37146+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
37147
37148 /*
37149 * Set up the Switcher's per-cpu areas.
37150@@ -559,7 +565,7 @@ void __init lguest_arch_host_init(void)
37151 * it will be undisturbed when we switch. To change %cs and jump we
37152 * need this structure to feed to Intel's "lcall" instruction.
37153 */
37154- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
37155+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
37156 lguest_entry.segment = LGUEST_CS;
37157
37158 /*
37159diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
37160index 40634b0..4f5855e 100644
37161--- a/drivers/lguest/x86/switcher_32.S
37162+++ b/drivers/lguest/x86/switcher_32.S
37163@@ -87,6 +87,7 @@
37164 #include <asm/page.h>
37165 #include <asm/segment.h>
37166 #include <asm/lguest.h>
37167+#include <asm/processor-flags.h>
37168
37169 // We mark the start of the code to copy
37170 // It's placed in .text tho it's never run here
37171@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
37172 // Changes type when we load it: damn Intel!
37173 // For after we switch over our page tables
37174 // That entry will be read-only: we'd crash.
37175+
37176+#ifdef CONFIG_PAX_KERNEXEC
37177+ mov %cr0, %edx
37178+ xor $X86_CR0_WP, %edx
37179+ mov %edx, %cr0
37180+#endif
37181+
37182 movl $(GDT_ENTRY_TSS*8), %edx
37183 ltr %dx
37184
37185@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
37186 // Let's clear it again for our return.
37187 // The GDT descriptor of the Host
37188 // Points to the table after two "size" bytes
37189- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
37190+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
37191 // Clear "used" from type field (byte 5, bit 2)
37192- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
37193+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
37194+
37195+#ifdef CONFIG_PAX_KERNEXEC
37196+ mov %cr0, %eax
37197+ xor $X86_CR0_WP, %eax
37198+ mov %eax, %cr0
37199+#endif
37200
37201 // Once our page table's switched, the Guest is live!
37202 // The Host fades as we run this final step.
37203@@ -295,13 +309,12 @@ deliver_to_host:
37204 // I consulted gcc, and it gave
37205 // These instructions, which I gladly credit:
37206 leal (%edx,%ebx,8), %eax
37207- movzwl (%eax),%edx
37208- movl 4(%eax), %eax
37209- xorw %ax, %ax
37210- orl %eax, %edx
37211+ movl 4(%eax), %edx
37212+ movw (%eax), %dx
37213 // Now the address of the handler's in %edx
37214 // We call it now: its "iret" drops us home.
37215- jmp *%edx
37216+ ljmp $__KERNEL_CS, $1f
37217+1: jmp *%edx
37218
37219 // Every interrupt can come to us here
37220 // But we must truly tell each apart.
37221diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
37222index 7155945..4bcc562 100644
37223--- a/drivers/md/bitmap.c
37224+++ b/drivers/md/bitmap.c
37225@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
37226 chunk_kb ? "KB" : "B");
37227 if (bitmap->storage.file) {
37228 seq_printf(seq, ", file: ");
37229- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
37230+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
37231 }
37232
37233 seq_printf(seq, "\n");
37234diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
37235index 0666b5d..ed82cb4 100644
37236--- a/drivers/md/dm-ioctl.c
37237+++ b/drivers/md/dm-ioctl.c
37238@@ -1628,7 +1628,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
37239 cmd == DM_LIST_VERSIONS_CMD)
37240 return 0;
37241
37242- if ((cmd == DM_DEV_CREATE_CMD)) {
37243+ if (cmd == DM_DEV_CREATE_CMD) {
37244 if (!*param->name) {
37245 DMWARN("name not supplied when creating device");
37246 return -EINVAL;
37247diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
37248index fa51918..c26253c 100644
37249--- a/drivers/md/dm-raid1.c
37250+++ b/drivers/md/dm-raid1.c
37251@@ -40,7 +40,7 @@ enum dm_raid1_error {
37252
37253 struct mirror {
37254 struct mirror_set *ms;
37255- atomic_t error_count;
37256+ atomic_unchecked_t error_count;
37257 unsigned long error_type;
37258 struct dm_dev *dev;
37259 sector_t offset;
37260@@ -183,7 +183,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
37261 struct mirror *m;
37262
37263 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
37264- if (!atomic_read(&m->error_count))
37265+ if (!atomic_read_unchecked(&m->error_count))
37266 return m;
37267
37268 return NULL;
37269@@ -215,7 +215,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
37270 * simple way to tell if a device has encountered
37271 * errors.
37272 */
37273- atomic_inc(&m->error_count);
37274+ atomic_inc_unchecked(&m->error_count);
37275
37276 if (test_and_set_bit(error_type, &m->error_type))
37277 return;
37278@@ -406,7 +406,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
37279 struct mirror *m = get_default_mirror(ms);
37280
37281 do {
37282- if (likely(!atomic_read(&m->error_count)))
37283+ if (likely(!atomic_read_unchecked(&m->error_count)))
37284 return m;
37285
37286 if (m-- == ms->mirror)
37287@@ -420,7 +420,7 @@ static int default_ok(struct mirror *m)
37288 {
37289 struct mirror *default_mirror = get_default_mirror(m->ms);
37290
37291- return !atomic_read(&default_mirror->error_count);
37292+ return !atomic_read_unchecked(&default_mirror->error_count);
37293 }
37294
37295 static int mirror_available(struct mirror_set *ms, struct bio *bio)
37296@@ -557,7 +557,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
37297 */
37298 if (likely(region_in_sync(ms, region, 1)))
37299 m = choose_mirror(ms, bio->bi_sector);
37300- else if (m && atomic_read(&m->error_count))
37301+ else if (m && atomic_read_unchecked(&m->error_count))
37302 m = NULL;
37303
37304 if (likely(m))
37305@@ -924,7 +924,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
37306 }
37307
37308 ms->mirror[mirror].ms = ms;
37309- atomic_set(&(ms->mirror[mirror].error_count), 0);
37310+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
37311 ms->mirror[mirror].error_type = 0;
37312 ms->mirror[mirror].offset = offset;
37313
37314@@ -1337,7 +1337,7 @@ static void mirror_resume(struct dm_target *ti)
37315 */
37316 static char device_status_char(struct mirror *m)
37317 {
37318- if (!atomic_read(&(m->error_count)))
37319+ if (!atomic_read_unchecked(&(m->error_count)))
37320 return 'A';
37321
37322 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
37323diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
37324index c89cde8..9d184cf 100644
37325--- a/drivers/md/dm-stripe.c
37326+++ b/drivers/md/dm-stripe.c
37327@@ -20,7 +20,7 @@ struct stripe {
37328 struct dm_dev *dev;
37329 sector_t physical_start;
37330
37331- atomic_t error_count;
37332+ atomic_unchecked_t error_count;
37333 };
37334
37335 struct stripe_c {
37336@@ -184,7 +184,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
37337 kfree(sc);
37338 return r;
37339 }
37340- atomic_set(&(sc->stripe[i].error_count), 0);
37341+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
37342 }
37343
37344 ti->private = sc;
37345@@ -325,7 +325,7 @@ static int stripe_status(struct dm_target *ti, status_type_t type,
37346 DMEMIT("%d ", sc->stripes);
37347 for (i = 0; i < sc->stripes; i++) {
37348 DMEMIT("%s ", sc->stripe[i].dev->name);
37349- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
37350+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
37351 'D' : 'A';
37352 }
37353 buffer[i] = '\0';
37354@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
37355 */
37356 for (i = 0; i < sc->stripes; i++)
37357 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
37358- atomic_inc(&(sc->stripe[i].error_count));
37359- if (atomic_read(&(sc->stripe[i].error_count)) <
37360+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
37361+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
37362 DM_IO_ERROR_THRESHOLD)
37363 schedule_work(&sc->trigger_event);
37364 }
37365diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
37366index daf25d0..d74f49f 100644
37367--- a/drivers/md/dm-table.c
37368+++ b/drivers/md/dm-table.c
37369@@ -390,7 +390,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
37370 if (!dev_size)
37371 return 0;
37372
37373- if ((start >= dev_size) || (start + len > dev_size)) {
37374+ if ((start >= dev_size) || (len > dev_size - start)) {
37375 DMWARN("%s: %s too small for target: "
37376 "start=%llu, len=%llu, dev_size=%llu",
37377 dm_device_name(ti->table->md), bdevname(bdev, b),
37378diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
37379index 4d6e853..a234157 100644
37380--- a/drivers/md/dm-thin-metadata.c
37381+++ b/drivers/md/dm-thin-metadata.c
37382@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37383 {
37384 pmd->info.tm = pmd->tm;
37385 pmd->info.levels = 2;
37386- pmd->info.value_type.context = pmd->data_sm;
37387+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37388 pmd->info.value_type.size = sizeof(__le64);
37389 pmd->info.value_type.inc = data_block_inc;
37390 pmd->info.value_type.dec = data_block_dec;
37391@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
37392
37393 pmd->bl_info.tm = pmd->tm;
37394 pmd->bl_info.levels = 1;
37395- pmd->bl_info.value_type.context = pmd->data_sm;
37396+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
37397 pmd->bl_info.value_type.size = sizeof(__le64);
37398 pmd->bl_info.value_type.inc = data_block_inc;
37399 pmd->bl_info.value_type.dec = data_block_dec;
37400diff --git a/drivers/md/dm.c b/drivers/md/dm.c
37401index 314a0e2..1376406 100644
37402--- a/drivers/md/dm.c
37403+++ b/drivers/md/dm.c
37404@@ -170,9 +170,9 @@ struct mapped_device {
37405 /*
37406 * Event handling.
37407 */
37408- atomic_t event_nr;
37409+ atomic_unchecked_t event_nr;
37410 wait_queue_head_t eventq;
37411- atomic_t uevent_seq;
37412+ atomic_unchecked_t uevent_seq;
37413 struct list_head uevent_list;
37414 spinlock_t uevent_lock; /* Protect access to uevent_list */
37415
37416@@ -1872,8 +1872,8 @@ static struct mapped_device *alloc_dev(int minor)
37417 rwlock_init(&md->map_lock);
37418 atomic_set(&md->holders, 1);
37419 atomic_set(&md->open_count, 0);
37420- atomic_set(&md->event_nr, 0);
37421- atomic_set(&md->uevent_seq, 0);
37422+ atomic_set_unchecked(&md->event_nr, 0);
37423+ atomic_set_unchecked(&md->uevent_seq, 0);
37424 INIT_LIST_HEAD(&md->uevent_list);
37425 spin_lock_init(&md->uevent_lock);
37426
37427@@ -2014,7 +2014,7 @@ static void event_callback(void *context)
37428
37429 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
37430
37431- atomic_inc(&md->event_nr);
37432+ atomic_inc_unchecked(&md->event_nr);
37433 wake_up(&md->eventq);
37434 }
37435
37436@@ -2669,18 +2669,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
37437
37438 uint32_t dm_next_uevent_seq(struct mapped_device *md)
37439 {
37440- return atomic_add_return(1, &md->uevent_seq);
37441+ return atomic_add_return_unchecked(1, &md->uevent_seq);
37442 }
37443
37444 uint32_t dm_get_event_nr(struct mapped_device *md)
37445 {
37446- return atomic_read(&md->event_nr);
37447+ return atomic_read_unchecked(&md->event_nr);
37448 }
37449
37450 int dm_wait_event(struct mapped_device *md, int event_nr)
37451 {
37452 return wait_event_interruptible(md->eventq,
37453- (event_nr != atomic_read(&md->event_nr)));
37454+ (event_nr != atomic_read_unchecked(&md->event_nr)));
37455 }
37456
37457 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
37458diff --git a/drivers/md/md.c b/drivers/md/md.c
37459index 3db3d1b..9487468 100644
37460--- a/drivers/md/md.c
37461+++ b/drivers/md/md.c
37462@@ -240,10 +240,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
37463 * start build, activate spare
37464 */
37465 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
37466-static atomic_t md_event_count;
37467+static atomic_unchecked_t md_event_count;
37468 void md_new_event(struct mddev *mddev)
37469 {
37470- atomic_inc(&md_event_count);
37471+ atomic_inc_unchecked(&md_event_count);
37472 wake_up(&md_event_waiters);
37473 }
37474 EXPORT_SYMBOL_GPL(md_new_event);
37475@@ -253,7 +253,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
37476 */
37477 static void md_new_event_inintr(struct mddev *mddev)
37478 {
37479- atomic_inc(&md_event_count);
37480+ atomic_inc_unchecked(&md_event_count);
37481 wake_up(&md_event_waiters);
37482 }
37483
37484@@ -1503,7 +1503,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
37485 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
37486 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
37487 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
37488- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37489+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
37490
37491 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
37492 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
37493@@ -1747,7 +1747,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
37494 else
37495 sb->resync_offset = cpu_to_le64(0);
37496
37497- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
37498+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
37499
37500 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
37501 sb->size = cpu_to_le64(mddev->dev_sectors);
37502@@ -2747,7 +2747,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
37503 static ssize_t
37504 errors_show(struct md_rdev *rdev, char *page)
37505 {
37506- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
37507+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
37508 }
37509
37510 static ssize_t
37511@@ -2756,7 +2756,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
37512 char *e;
37513 unsigned long n = simple_strtoul(buf, &e, 10);
37514 if (*buf && (*e == 0 || *e == '\n')) {
37515- atomic_set(&rdev->corrected_errors, n);
37516+ atomic_set_unchecked(&rdev->corrected_errors, n);
37517 return len;
37518 }
37519 return -EINVAL;
37520@@ -3203,8 +3203,8 @@ int md_rdev_init(struct md_rdev *rdev)
37521 rdev->sb_loaded = 0;
37522 rdev->bb_page = NULL;
37523 atomic_set(&rdev->nr_pending, 0);
37524- atomic_set(&rdev->read_errors, 0);
37525- atomic_set(&rdev->corrected_errors, 0);
37526+ atomic_set_unchecked(&rdev->read_errors, 0);
37527+ atomic_set_unchecked(&rdev->corrected_errors, 0);
37528
37529 INIT_LIST_HEAD(&rdev->same_set);
37530 init_waitqueue_head(&rdev->blocked_wait);
37531@@ -6980,7 +6980,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
37532
37533 spin_unlock(&pers_lock);
37534 seq_printf(seq, "\n");
37535- seq->poll_event = atomic_read(&md_event_count);
37536+ seq->poll_event = atomic_read_unchecked(&md_event_count);
37537 return 0;
37538 }
37539 if (v == (void*)2) {
37540@@ -7083,7 +7083,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
37541 return error;
37542
37543 seq = file->private_data;
37544- seq->poll_event = atomic_read(&md_event_count);
37545+ seq->poll_event = atomic_read_unchecked(&md_event_count);
37546 return error;
37547 }
37548
37549@@ -7097,7 +7097,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
37550 /* always allow read */
37551 mask = POLLIN | POLLRDNORM;
37552
37553- if (seq->poll_event != atomic_read(&md_event_count))
37554+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
37555 mask |= POLLERR | POLLPRI;
37556 return mask;
37557 }
37558@@ -7141,7 +7141,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
37559 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
37560 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
37561 (int)part_stat_read(&disk->part0, sectors[1]) -
37562- atomic_read(&disk->sync_io);
37563+ atomic_read_unchecked(&disk->sync_io);
37564 /* sync IO will cause sync_io to increase before the disk_stats
37565 * as sync_io is counted when a request starts, and
37566 * disk_stats is counted when it completes.
37567diff --git a/drivers/md/md.h b/drivers/md/md.h
37568index eca59c3..7c42285 100644
37569--- a/drivers/md/md.h
37570+++ b/drivers/md/md.h
37571@@ -94,13 +94,13 @@ struct md_rdev {
37572 * only maintained for arrays that
37573 * support hot removal
37574 */
37575- atomic_t read_errors; /* number of consecutive read errors that
37576+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
37577 * we have tried to ignore.
37578 */
37579 struct timespec last_read_error; /* monotonic time since our
37580 * last read error
37581 */
37582- atomic_t corrected_errors; /* number of corrected read errors,
37583+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
37584 * for reporting to userspace and storing
37585 * in superblock.
37586 */
37587@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
37588
37589 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
37590 {
37591- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37592+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
37593 }
37594
37595 struct md_personality
37596diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
37597index 1cbfc6b..56e1dbb 100644
37598--- a/drivers/md/persistent-data/dm-space-map.h
37599+++ b/drivers/md/persistent-data/dm-space-map.h
37600@@ -60,6 +60,7 @@ struct dm_space_map {
37601 int (*root_size)(struct dm_space_map *sm, size_t *result);
37602 int (*copy_root)(struct dm_space_map *sm, void *copy_to_here_le, size_t len);
37603 };
37604+typedef struct dm_space_map __no_const dm_space_map_no_const;
37605
37606 /*----------------------------------------------------------------*/
37607
37608diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
37609index d5bddfc..b079b4b 100644
37610--- a/drivers/md/raid1.c
37611+++ b/drivers/md/raid1.c
37612@@ -1818,7 +1818,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
37613 if (r1_sync_page_io(rdev, sect, s,
37614 bio->bi_io_vec[idx].bv_page,
37615 READ) != 0)
37616- atomic_add(s, &rdev->corrected_errors);
37617+ atomic_add_unchecked(s, &rdev->corrected_errors);
37618 }
37619 sectors -= s;
37620 sect += s;
37621@@ -2040,7 +2040,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
37622 test_bit(In_sync, &rdev->flags)) {
37623 if (r1_sync_page_io(rdev, sect, s,
37624 conf->tmppage, READ)) {
37625- atomic_add(s, &rdev->corrected_errors);
37626+ atomic_add_unchecked(s, &rdev->corrected_errors);
37627 printk(KERN_INFO
37628 "md/raid1:%s: read error corrected "
37629 "(%d sectors at %llu on %s)\n",
37630diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
37631index 64d4824..8b9ea57 100644
37632--- a/drivers/md/raid10.c
37633+++ b/drivers/md/raid10.c
37634@@ -1877,7 +1877,7 @@ static void end_sync_read(struct bio *bio, int error)
37635 /* The write handler will notice the lack of
37636 * R10BIO_Uptodate and record any errors etc
37637 */
37638- atomic_add(r10_bio->sectors,
37639+ atomic_add_unchecked(r10_bio->sectors,
37640 &conf->mirrors[d].rdev->corrected_errors);
37641
37642 /* for reconstruct, we always reschedule after a read.
37643@@ -2226,7 +2226,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
37644 {
37645 struct timespec cur_time_mon;
37646 unsigned long hours_since_last;
37647- unsigned int read_errors = atomic_read(&rdev->read_errors);
37648+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
37649
37650 ktime_get_ts(&cur_time_mon);
37651
37652@@ -2248,9 +2248,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
37653 * overflowing the shift of read_errors by hours_since_last.
37654 */
37655 if (hours_since_last >= 8 * sizeof(read_errors))
37656- atomic_set(&rdev->read_errors, 0);
37657+ atomic_set_unchecked(&rdev->read_errors, 0);
37658 else
37659- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
37660+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
37661 }
37662
37663 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
37664@@ -2304,8 +2304,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
37665 return;
37666
37667 check_decay_read_errors(mddev, rdev);
37668- atomic_inc(&rdev->read_errors);
37669- if (atomic_read(&rdev->read_errors) > max_read_errors) {
37670+ atomic_inc_unchecked(&rdev->read_errors);
37671+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
37672 char b[BDEVNAME_SIZE];
37673 bdevname(rdev->bdev, b);
37674
37675@@ -2313,7 +2313,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
37676 "md/raid10:%s: %s: Raid device exceeded "
37677 "read_error threshold [cur %d:max %d]\n",
37678 mdname(mddev), b,
37679- atomic_read(&rdev->read_errors), max_read_errors);
37680+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
37681 printk(KERN_NOTICE
37682 "md/raid10:%s: %s: Failing raid device\n",
37683 mdname(mddev), b);
37684@@ -2468,7 +2468,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
37685 sect +
37686 choose_data_offset(r10_bio, rdev)),
37687 bdevname(rdev->bdev, b));
37688- atomic_add(s, &rdev->corrected_errors);
37689+ atomic_add_unchecked(s, &rdev->corrected_errors);
37690 }
37691
37692 rdev_dec_pending(rdev, mddev);
37693diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
37694index 19d77a0..56051b92 100644
37695--- a/drivers/md/raid5.c
37696+++ b/drivers/md/raid5.c
37697@@ -1797,21 +1797,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
37698 mdname(conf->mddev), STRIPE_SECTORS,
37699 (unsigned long long)s,
37700 bdevname(rdev->bdev, b));
37701- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
37702+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
37703 clear_bit(R5_ReadError, &sh->dev[i].flags);
37704 clear_bit(R5_ReWrite, &sh->dev[i].flags);
37705 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
37706 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
37707
37708- if (atomic_read(&rdev->read_errors))
37709- atomic_set(&rdev->read_errors, 0);
37710+ if (atomic_read_unchecked(&rdev->read_errors))
37711+ atomic_set_unchecked(&rdev->read_errors, 0);
37712 } else {
37713 const char *bdn = bdevname(rdev->bdev, b);
37714 int retry = 0;
37715 int set_bad = 0;
37716
37717 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
37718- atomic_inc(&rdev->read_errors);
37719+ atomic_inc_unchecked(&rdev->read_errors);
37720 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
37721 printk_ratelimited(
37722 KERN_WARNING
37723@@ -1839,7 +1839,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
37724 mdname(conf->mddev),
37725 (unsigned long long)s,
37726 bdn);
37727- } else if (atomic_read(&rdev->read_errors)
37728+ } else if (atomic_read_unchecked(&rdev->read_errors)
37729 > conf->max_nr_stripes)
37730 printk(KERN_WARNING
37731 "md/raid:%s: Too many read errors, failing device %s.\n",
37732diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
37733index d33101a..6b13069 100644
37734--- a/drivers/media/dvb-core/dvbdev.c
37735+++ b/drivers/media/dvb-core/dvbdev.c
37736@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
37737 const struct dvb_device *template, void *priv, int type)
37738 {
37739 struct dvb_device *dvbdev;
37740- struct file_operations *dvbdevfops;
37741+ file_operations_no_const *dvbdevfops;
37742 struct device *clsdev;
37743 int minor;
37744 int id;
37745diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
37746index 404f63a..4796533 100644
37747--- a/drivers/media/dvb-frontends/dib3000.h
37748+++ b/drivers/media/dvb-frontends/dib3000.h
37749@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
37750 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
37751 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
37752 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
37753-};
37754+} __no_const;
37755
37756 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
37757 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
37758diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
37759index 8e9a668..78d6310 100644
37760--- a/drivers/media/platform/omap/omap_vout.c
37761+++ b/drivers/media/platform/omap/omap_vout.c
37762@@ -63,7 +63,6 @@ enum omap_vout_channels {
37763 OMAP_VIDEO2,
37764 };
37765
37766-static struct videobuf_queue_ops video_vbq_ops;
37767 /* Variables configurable through module params*/
37768 static u32 video1_numbuffers = 3;
37769 static u32 video2_numbuffers = 3;
37770@@ -1012,6 +1011,12 @@ static int omap_vout_open(struct file *file)
37771 {
37772 struct videobuf_queue *q;
37773 struct omap_vout_device *vout = NULL;
37774+ static struct videobuf_queue_ops video_vbq_ops = {
37775+ .buf_setup = omap_vout_buffer_setup,
37776+ .buf_prepare = omap_vout_buffer_prepare,
37777+ .buf_release = omap_vout_buffer_release,
37778+ .buf_queue = omap_vout_buffer_queue,
37779+ };
37780
37781 vout = video_drvdata(file);
37782 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
37783@@ -1029,10 +1034,6 @@ static int omap_vout_open(struct file *file)
37784 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
37785
37786 q = &vout->vbq;
37787- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
37788- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
37789- video_vbq_ops.buf_release = omap_vout_buffer_release;
37790- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
37791 spin_lock_init(&vout->vbq_lock);
37792
37793 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
37794diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
37795index b671e20..34088b7 100644
37796--- a/drivers/media/platform/s5p-tv/mixer.h
37797+++ b/drivers/media/platform/s5p-tv/mixer.h
37798@@ -155,7 +155,7 @@ struct mxr_layer {
37799 /** layer index (unique identifier) */
37800 int idx;
37801 /** callbacks for layer methods */
37802- struct mxr_layer_ops ops;
37803+ struct mxr_layer_ops *ops;
37804 /** format array */
37805 const struct mxr_format **fmt_array;
37806 /** size of format array */
37807diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
37808index b93a21f..2535195 100644
37809--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
37810+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
37811@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
37812 {
37813 struct mxr_layer *layer;
37814 int ret;
37815- struct mxr_layer_ops ops = {
37816+ static struct mxr_layer_ops ops = {
37817 .release = mxr_graph_layer_release,
37818 .buffer_set = mxr_graph_buffer_set,
37819 .stream_set = mxr_graph_stream_set,
37820diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
37821index 3b1670a..595c939 100644
37822--- a/drivers/media/platform/s5p-tv/mixer_reg.c
37823+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
37824@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
37825 layer->update_buf = next;
37826 }
37827
37828- layer->ops.buffer_set(layer, layer->update_buf);
37829+ layer->ops->buffer_set(layer, layer->update_buf);
37830
37831 if (done && done != layer->shadow_buf)
37832 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
37833diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
37834index 1f3b743..e839271 100644
37835--- a/drivers/media/platform/s5p-tv/mixer_video.c
37836+++ b/drivers/media/platform/s5p-tv/mixer_video.c
37837@@ -208,7 +208,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
37838 layer->geo.src.height = layer->geo.src.full_height;
37839
37840 mxr_geometry_dump(mdev, &layer->geo);
37841- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37842+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37843 mxr_geometry_dump(mdev, &layer->geo);
37844 }
37845
37846@@ -226,7 +226,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
37847 layer->geo.dst.full_width = mbus_fmt.width;
37848 layer->geo.dst.full_height = mbus_fmt.height;
37849 layer->geo.dst.field = mbus_fmt.field;
37850- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37851+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
37852
37853 mxr_geometry_dump(mdev, &layer->geo);
37854 }
37855@@ -332,7 +332,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
37856 /* set source size to highest accepted value */
37857 geo->src.full_width = max(geo->dst.full_width, pix->width);
37858 geo->src.full_height = max(geo->dst.full_height, pix->height);
37859- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37860+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37861 mxr_geometry_dump(mdev, &layer->geo);
37862 /* set cropping to total visible screen */
37863 geo->src.width = pix->width;
37864@@ -340,12 +340,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
37865 geo->src.x_offset = 0;
37866 geo->src.y_offset = 0;
37867 /* assure consistency of geometry */
37868- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
37869+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
37870 mxr_geometry_dump(mdev, &layer->geo);
37871 /* set full size to lowest possible value */
37872 geo->src.full_width = 0;
37873 geo->src.full_height = 0;
37874- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37875+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
37876 mxr_geometry_dump(mdev, &layer->geo);
37877
37878 /* returning results */
37879@@ -472,7 +472,7 @@ static int mxr_s_selection(struct file *file, void *fh,
37880 target->width = s->r.width;
37881 target->height = s->r.height;
37882
37883- layer->ops.fix_geometry(layer, stage, s->flags);
37884+ layer->ops->fix_geometry(layer, stage, s->flags);
37885
37886 /* retrieve update selection rectangle */
37887 res.left = target->x_offset;
37888@@ -937,13 +937,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
37889 mxr_output_get(mdev);
37890
37891 mxr_layer_update_output(layer);
37892- layer->ops.format_set(layer);
37893+ layer->ops->format_set(layer);
37894 /* enabling layer in hardware */
37895 spin_lock_irqsave(&layer->enq_slock, flags);
37896 layer->state = MXR_LAYER_STREAMING;
37897 spin_unlock_irqrestore(&layer->enq_slock, flags);
37898
37899- layer->ops.stream_set(layer, MXR_ENABLE);
37900+ layer->ops->stream_set(layer, MXR_ENABLE);
37901 mxr_streamer_get(mdev);
37902
37903 return 0;
37904@@ -1013,7 +1013,7 @@ static int stop_streaming(struct vb2_queue *vq)
37905 spin_unlock_irqrestore(&layer->enq_slock, flags);
37906
37907 /* disabling layer in hardware */
37908- layer->ops.stream_set(layer, MXR_DISABLE);
37909+ layer->ops->stream_set(layer, MXR_DISABLE);
37910 /* remove one streamer */
37911 mxr_streamer_put(mdev);
37912 /* allow changes in output configuration */
37913@@ -1052,8 +1052,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
37914
37915 void mxr_layer_release(struct mxr_layer *layer)
37916 {
37917- if (layer->ops.release)
37918- layer->ops.release(layer);
37919+ if (layer->ops->release)
37920+ layer->ops->release(layer);
37921 }
37922
37923 void mxr_base_layer_release(struct mxr_layer *layer)
37924@@ -1079,7 +1079,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
37925
37926 layer->mdev = mdev;
37927 layer->idx = idx;
37928- layer->ops = *ops;
37929+ layer->ops = ops;
37930
37931 spin_lock_init(&layer->enq_slock);
37932 INIT_LIST_HEAD(&layer->enq_list);
37933diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
37934index 3d13a63..da31bf1 100644
37935--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
37936+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
37937@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
37938 {
37939 struct mxr_layer *layer;
37940 int ret;
37941- struct mxr_layer_ops ops = {
37942+ static struct mxr_layer_ops ops = {
37943 .release = mxr_vp_layer_release,
37944 .buffer_set = mxr_vp_buffer_set,
37945 .stream_set = mxr_vp_stream_set,
37946diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
37947index 643d80a..56bb96b 100644
37948--- a/drivers/media/radio/radio-cadet.c
37949+++ b/drivers/media/radio/radio-cadet.c
37950@@ -302,6 +302,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
37951 unsigned char readbuf[RDS_BUFFER];
37952 int i = 0;
37953
37954+ if (count > RDS_BUFFER)
37955+ return -EFAULT;
37956 mutex_lock(&dev->lock);
37957 if (dev->rdsstat == 0)
37958 cadet_start_rds(dev);
37959@@ -317,7 +319,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
37960 while (i < count && dev->rdsin != dev->rdsout)
37961 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
37962
37963- if (i && copy_to_user(data, readbuf, i))
37964+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
37965 i = -EFAULT;
37966 unlock:
37967 mutex_unlock(&dev->lock);
37968diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
37969index 3940bb0..fb3952a 100644
37970--- a/drivers/media/usb/dvb-usb/cxusb.c
37971+++ b/drivers/media/usb/dvb-usb/cxusb.c
37972@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
37973
37974 struct dib0700_adapter_state {
37975 int (*set_param_save) (struct dvb_frontend *);
37976-};
37977+} __no_const;
37978
37979 static int dib7070_set_param_override(struct dvb_frontend *fe)
37980 {
37981diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
37982index 9382895..ac8093c 100644
37983--- a/drivers/media/usb/dvb-usb/dw2102.c
37984+++ b/drivers/media/usb/dvb-usb/dw2102.c
37985@@ -95,7 +95,7 @@ struct su3000_state {
37986
37987 struct s6x0_state {
37988 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
37989-};
37990+} __no_const;
37991
37992 /* debug */
37993 static int dvb_usb_dw2102_debug;
37994diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
37995index aa6e7c7..4cd8061 100644
37996--- a/drivers/media/v4l2-core/v4l2-ioctl.c
37997+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
37998@@ -1923,7 +1923,8 @@ struct v4l2_ioctl_info {
37999 struct file *file, void *fh, void *p);
38000 } u;
38001 void (*debug)(const void *arg, bool write_only);
38002-};
38003+} __do_const;
38004+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
38005
38006 /* This control needs a priority check */
38007 #define INFO_FL_PRIO (1 << 0)
38008@@ -2108,7 +2109,7 @@ static long __video_do_ioctl(struct file *file,
38009 struct video_device *vfd = video_devdata(file);
38010 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
38011 bool write_only = false;
38012- struct v4l2_ioctl_info default_info;
38013+ v4l2_ioctl_info_no_const default_info;
38014 const struct v4l2_ioctl_info *info;
38015 void *fh = file->private_data;
38016 struct v4l2_fh *vfh = NULL;
38017diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c
38018index 29b2172..a7c5b31 100644
38019--- a/drivers/memstick/host/r592.c
38020+++ b/drivers/memstick/host/r592.c
38021@@ -454,7 +454,7 @@ static int r592_transfer_fifo_pio(struct r592_device *dev)
38022 /* Executes one TPC (data is read/written from small or large fifo) */
38023 static void r592_execute_tpc(struct r592_device *dev)
38024 {
38025- bool is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38026+ bool is_write;
38027 int len, error;
38028 u32 status, reg;
38029
38030@@ -463,6 +463,7 @@ static void r592_execute_tpc(struct r592_device *dev)
38031 return;
38032 }
38033
38034+ is_write = dev->req->tpc >= MS_TPC_SET_RW_REG_ADRS;
38035 len = dev->req->long_data ?
38036 dev->req->sg.length : dev->req->data_len;
38037
38038diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
38039index fb69baa..cf7ad22 100644
38040--- a/drivers/message/fusion/mptbase.c
38041+++ b/drivers/message/fusion/mptbase.c
38042@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
38043 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
38044 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
38045
38046+#ifdef CONFIG_GRKERNSEC_HIDESYM
38047+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
38048+#else
38049 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
38050 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
38051+#endif
38052+
38053 /*
38054 * Rounding UP to nearest 4-kB boundary here...
38055 */
38056diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
38057index fa43c39..daeb158 100644
38058--- a/drivers/message/fusion/mptsas.c
38059+++ b/drivers/message/fusion/mptsas.c
38060@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
38061 return 0;
38062 }
38063
38064+static inline void
38065+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38066+{
38067+ if (phy_info->port_details) {
38068+ phy_info->port_details->rphy = rphy;
38069+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38070+ ioc->name, rphy));
38071+ }
38072+
38073+ if (rphy) {
38074+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38075+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38076+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38077+ ioc->name, rphy, rphy->dev.release));
38078+ }
38079+}
38080+
38081 /* no mutex */
38082 static void
38083 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
38084@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
38085 return NULL;
38086 }
38087
38088-static inline void
38089-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
38090-{
38091- if (phy_info->port_details) {
38092- phy_info->port_details->rphy = rphy;
38093- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
38094- ioc->name, rphy));
38095- }
38096-
38097- if (rphy) {
38098- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
38099- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
38100- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
38101- ioc->name, rphy, rphy->dev.release));
38102- }
38103-}
38104-
38105 static inline struct sas_port *
38106 mptsas_get_port(struct mptsas_phyinfo *phy_info)
38107 {
38108diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
38109index 164afa7..b6b2e74 100644
38110--- a/drivers/message/fusion/mptscsih.c
38111+++ b/drivers/message/fusion/mptscsih.c
38112@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
38113
38114 h = shost_priv(SChost);
38115
38116- if (h) {
38117- if (h->info_kbuf == NULL)
38118- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38119- return h->info_kbuf;
38120- h->info_kbuf[0] = '\0';
38121+ if (!h)
38122+ return NULL;
38123
38124- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38125- h->info_kbuf[size-1] = '\0';
38126- }
38127+ if (h->info_kbuf == NULL)
38128+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
38129+ return h->info_kbuf;
38130+ h->info_kbuf[0] = '\0';
38131+
38132+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
38133+ h->info_kbuf[size-1] = '\0';
38134
38135 return h->info_kbuf;
38136 }
38137diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
38138index 8001aa6..b137580 100644
38139--- a/drivers/message/i2o/i2o_proc.c
38140+++ b/drivers/message/i2o/i2o_proc.c
38141@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
38142 "Array Controller Device"
38143 };
38144
38145-static char *chtostr(char *tmp, u8 *chars, int n)
38146-{
38147- tmp[0] = 0;
38148- return strncat(tmp, (char *)chars, n);
38149-}
38150-
38151 static int i2o_report_query_status(struct seq_file *seq, int block_status,
38152 char *group)
38153 {
38154@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38155 } *result;
38156
38157 i2o_exec_execute_ddm_table ddm_table;
38158- char tmp[28 + 1];
38159
38160 result = kmalloc(sizeof(*result), GFP_KERNEL);
38161 if (!result)
38162@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
38163
38164 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
38165 seq_printf(seq, "%-#8x", ddm_table.module_id);
38166- seq_printf(seq, "%-29s",
38167- chtostr(tmp, ddm_table.module_name_version, 28));
38168+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
38169 seq_printf(seq, "%9d ", ddm_table.data_size);
38170 seq_printf(seq, "%8d", ddm_table.code_size);
38171
38172@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38173
38174 i2o_driver_result_table *result;
38175 i2o_driver_store_table *dst;
38176- char tmp[28 + 1];
38177
38178 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
38179 if (result == NULL)
38180@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
38181
38182 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
38183 seq_printf(seq, "%-#8x", dst->module_id);
38184- seq_printf(seq, "%-29s",
38185- chtostr(tmp, dst->module_name_version, 28));
38186- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
38187+ seq_printf(seq, "%-.28s", dst->module_name_version);
38188+ seq_printf(seq, "%-.8s", dst->date);
38189 seq_printf(seq, "%8d ", dst->module_size);
38190 seq_printf(seq, "%8d ", dst->mpb_size);
38191 seq_printf(seq, "0x%04x", dst->module_flags);
38192@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38193 // == (allow) 512d bytes (max)
38194 static u16 *work16 = (u16 *) work32;
38195 int token;
38196- char tmp[16 + 1];
38197
38198 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
38199
38200@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
38201 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
38202 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
38203 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
38204- seq_printf(seq, "Vendor info : %s\n",
38205- chtostr(tmp, (u8 *) (work32 + 2), 16));
38206- seq_printf(seq, "Product info : %s\n",
38207- chtostr(tmp, (u8 *) (work32 + 6), 16));
38208- seq_printf(seq, "Description : %s\n",
38209- chtostr(tmp, (u8 *) (work32 + 10), 16));
38210- seq_printf(seq, "Product rev. : %s\n",
38211- chtostr(tmp, (u8 *) (work32 + 14), 8));
38212+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
38213+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
38214+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
38215+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
38216
38217 seq_printf(seq, "Serial number : ");
38218 print_serial_number(seq, (u8 *) (work32 + 16),
38219@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38220 u8 pad[256]; // allow up to 256 byte (max) serial number
38221 } result;
38222
38223- char tmp[24 + 1];
38224-
38225 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
38226
38227 if (token < 0) {
38228@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
38229 }
38230
38231 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
38232- seq_printf(seq, "Module name : %s\n",
38233- chtostr(tmp, result.module_name, 24));
38234- seq_printf(seq, "Module revision : %s\n",
38235- chtostr(tmp, result.module_rev, 8));
38236+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
38237+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
38238
38239 seq_printf(seq, "Serial number : ");
38240 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
38241@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38242 u8 instance_number[4];
38243 } result;
38244
38245- char tmp[64 + 1];
38246-
38247 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
38248
38249 if (token < 0) {
38250@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
38251 return 0;
38252 }
38253
38254- seq_printf(seq, "Device name : %s\n",
38255- chtostr(tmp, result.device_name, 64));
38256- seq_printf(seq, "Service name : %s\n",
38257- chtostr(tmp, result.service_name, 64));
38258- seq_printf(seq, "Physical name : %s\n",
38259- chtostr(tmp, result.physical_location, 64));
38260- seq_printf(seq, "Instance number : %s\n",
38261- chtostr(tmp, result.instance_number, 4));
38262+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
38263+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
38264+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
38265+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
38266
38267 return 0;
38268 }
38269diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
38270index a8c08f3..155fe3d 100644
38271--- a/drivers/message/i2o/iop.c
38272+++ b/drivers/message/i2o/iop.c
38273@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
38274
38275 spin_lock_irqsave(&c->context_list_lock, flags);
38276
38277- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
38278- atomic_inc(&c->context_list_counter);
38279+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
38280+ atomic_inc_unchecked(&c->context_list_counter);
38281
38282- entry->context = atomic_read(&c->context_list_counter);
38283+ entry->context = atomic_read_unchecked(&c->context_list_counter);
38284
38285 list_add(&entry->list, &c->context_list);
38286
38287@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
38288
38289 #if BITS_PER_LONG == 64
38290 spin_lock_init(&c->context_list_lock);
38291- atomic_set(&c->context_list_counter, 0);
38292+ atomic_set_unchecked(&c->context_list_counter, 0);
38293 INIT_LIST_HEAD(&c->context_list);
38294 #endif
38295
38296diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
38297index 45ece11..8efa218 100644
38298--- a/drivers/mfd/janz-cmodio.c
38299+++ b/drivers/mfd/janz-cmodio.c
38300@@ -13,6 +13,7 @@
38301
38302 #include <linux/kernel.h>
38303 #include <linux/module.h>
38304+#include <linux/slab.h>
38305 #include <linux/init.h>
38306 #include <linux/pci.h>
38307 #include <linux/interrupt.h>
38308diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
38309index a5f9888..1c0ed56 100644
38310--- a/drivers/mfd/twl4030-irq.c
38311+++ b/drivers/mfd/twl4030-irq.c
38312@@ -35,6 +35,7 @@
38313 #include <linux/of.h>
38314 #include <linux/irqdomain.h>
38315 #include <linux/i2c/twl.h>
38316+#include <asm/pgtable.h>
38317
38318 #include "twl-core.h"
38319
38320@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
38321 * Install an irq handler for each of the SIH modules;
38322 * clone dummy irq_chip since PIH can't *do* anything
38323 */
38324- twl4030_irq_chip = dummy_irq_chip;
38325- twl4030_irq_chip.name = "twl4030";
38326+ pax_open_kernel();
38327+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
38328+ *(const char **)&twl4030_irq_chip.name = "twl4030";
38329
38330- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38331+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
38332+ pax_close_kernel();
38333
38334 for (i = irq_base; i < irq_end; i++) {
38335 irq_set_chip_and_handler(i, &twl4030_irq_chip,
38336diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
38337index 277a8db..0e0b754 100644
38338--- a/drivers/mfd/twl6030-irq.c
38339+++ b/drivers/mfd/twl6030-irq.c
38340@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
38341 * install an irq handler for each of the modules;
38342 * clone dummy irq_chip since PIH can't *do* anything
38343 */
38344- twl6030_irq_chip = dummy_irq_chip;
38345- twl6030_irq_chip.name = "twl6030";
38346- twl6030_irq_chip.irq_set_type = NULL;
38347- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38348+ pax_open_kernel();
38349+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
38350+ *(const char **)&twl6030_irq_chip.name = "twl6030";
38351+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
38352+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
38353+ pax_close_kernel();
38354
38355 for (i = irq_base; i < irq_end; i++) {
38356 irq_set_chip_and_handler(i, &twl6030_irq_chip,
38357diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
38358index f428d86..274c368 100644
38359--- a/drivers/misc/c2port/core.c
38360+++ b/drivers/misc/c2port/core.c
38361@@ -924,7 +924,9 @@ struct c2port_device *c2port_device_register(char *name,
38362 mutex_init(&c2dev->mutex);
38363
38364 /* Create binary file */
38365- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38366+ pax_open_kernel();
38367+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
38368+ pax_close_kernel();
38369 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
38370 if (unlikely(ret))
38371 goto error_device_create_bin_file;
38372diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
38373index 3aa9a96..59cf685 100644
38374--- a/drivers/misc/kgdbts.c
38375+++ b/drivers/misc/kgdbts.c
38376@@ -832,7 +832,7 @@ static void run_plant_and_detach_test(int is_early)
38377 char before[BREAK_INSTR_SIZE];
38378 char after[BREAK_INSTR_SIZE];
38379
38380- probe_kernel_read(before, (char *)kgdbts_break_test,
38381+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
38382 BREAK_INSTR_SIZE);
38383 init_simple_test();
38384 ts.tst = plant_and_detach_test;
38385@@ -840,7 +840,7 @@ static void run_plant_and_detach_test(int is_early)
38386 /* Activate test with initial breakpoint */
38387 if (!is_early)
38388 kgdb_breakpoint();
38389- probe_kernel_read(after, (char *)kgdbts_break_test,
38390+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
38391 BREAK_INSTR_SIZE);
38392 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
38393 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
38394diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
38395index 4a87e5c..76bdf5c 100644
38396--- a/drivers/misc/lis3lv02d/lis3lv02d.c
38397+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
38398@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
38399 * the lid is closed. This leads to interrupts as soon as a little move
38400 * is done.
38401 */
38402- atomic_inc(&lis3->count);
38403+ atomic_inc_unchecked(&lis3->count);
38404
38405 wake_up_interruptible(&lis3->misc_wait);
38406 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
38407@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
38408 if (lis3->pm_dev)
38409 pm_runtime_get_sync(lis3->pm_dev);
38410
38411- atomic_set(&lis3->count, 0);
38412+ atomic_set_unchecked(&lis3->count, 0);
38413 return 0;
38414 }
38415
38416@@ -617,7 +617,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
38417 add_wait_queue(&lis3->misc_wait, &wait);
38418 while (true) {
38419 set_current_state(TASK_INTERRUPTIBLE);
38420- data = atomic_xchg(&lis3->count, 0);
38421+ data = atomic_xchg_unchecked(&lis3->count, 0);
38422 if (data)
38423 break;
38424
38425@@ -658,7 +658,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
38426 struct lis3lv02d, miscdev);
38427
38428 poll_wait(file, &lis3->misc_wait, wait);
38429- if (atomic_read(&lis3->count))
38430+ if (atomic_read_unchecked(&lis3->count))
38431 return POLLIN | POLLRDNORM;
38432 return 0;
38433 }
38434diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
38435index c439c82..1f20f57 100644
38436--- a/drivers/misc/lis3lv02d/lis3lv02d.h
38437+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
38438@@ -297,7 +297,7 @@ struct lis3lv02d {
38439 struct input_polled_dev *idev; /* input device */
38440 struct platform_device *pdev; /* platform device */
38441 struct regulator_bulk_data regulators[2];
38442- atomic_t count; /* interrupt count after last read */
38443+ atomic_unchecked_t count; /* interrupt count after last read */
38444 union axis_conversion ac; /* hw -> logical axis */
38445 int mapped_btns[3];
38446
38447diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
38448index 2f30bad..c4c13d0 100644
38449--- a/drivers/misc/sgi-gru/gruhandles.c
38450+++ b/drivers/misc/sgi-gru/gruhandles.c
38451@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
38452 unsigned long nsec;
38453
38454 nsec = CLKS2NSEC(clks);
38455- atomic_long_inc(&mcs_op_statistics[op].count);
38456- atomic_long_add(nsec, &mcs_op_statistics[op].total);
38457+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
38458+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
38459 if (mcs_op_statistics[op].max < nsec)
38460 mcs_op_statistics[op].max = nsec;
38461 }
38462diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
38463index 950dbe9..eeef0f8 100644
38464--- a/drivers/misc/sgi-gru/gruprocfs.c
38465+++ b/drivers/misc/sgi-gru/gruprocfs.c
38466@@ -32,9 +32,9 @@
38467
38468 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
38469
38470-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
38471+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
38472 {
38473- unsigned long val = atomic_long_read(v);
38474+ unsigned long val = atomic_long_read_unchecked(v);
38475
38476 seq_printf(s, "%16lu %s\n", val, id);
38477 }
38478@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
38479
38480 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
38481 for (op = 0; op < mcsop_last; op++) {
38482- count = atomic_long_read(&mcs_op_statistics[op].count);
38483- total = atomic_long_read(&mcs_op_statistics[op].total);
38484+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
38485+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
38486 max = mcs_op_statistics[op].max;
38487 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
38488 count ? total / count : 0, max);
38489diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
38490index 5c3ce24..4915ccb 100644
38491--- a/drivers/misc/sgi-gru/grutables.h
38492+++ b/drivers/misc/sgi-gru/grutables.h
38493@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
38494 * GRU statistics.
38495 */
38496 struct gru_stats_s {
38497- atomic_long_t vdata_alloc;
38498- atomic_long_t vdata_free;
38499- atomic_long_t gts_alloc;
38500- atomic_long_t gts_free;
38501- atomic_long_t gms_alloc;
38502- atomic_long_t gms_free;
38503- atomic_long_t gts_double_allocate;
38504- atomic_long_t assign_context;
38505- atomic_long_t assign_context_failed;
38506- atomic_long_t free_context;
38507- atomic_long_t load_user_context;
38508- atomic_long_t load_kernel_context;
38509- atomic_long_t lock_kernel_context;
38510- atomic_long_t unlock_kernel_context;
38511- atomic_long_t steal_user_context;
38512- atomic_long_t steal_kernel_context;
38513- atomic_long_t steal_context_failed;
38514- atomic_long_t nopfn;
38515- atomic_long_t asid_new;
38516- atomic_long_t asid_next;
38517- atomic_long_t asid_wrap;
38518- atomic_long_t asid_reuse;
38519- atomic_long_t intr;
38520- atomic_long_t intr_cbr;
38521- atomic_long_t intr_tfh;
38522- atomic_long_t intr_spurious;
38523- atomic_long_t intr_mm_lock_failed;
38524- atomic_long_t call_os;
38525- atomic_long_t call_os_wait_queue;
38526- atomic_long_t user_flush_tlb;
38527- atomic_long_t user_unload_context;
38528- atomic_long_t user_exception;
38529- atomic_long_t set_context_option;
38530- atomic_long_t check_context_retarget_intr;
38531- atomic_long_t check_context_unload;
38532- atomic_long_t tlb_dropin;
38533- atomic_long_t tlb_preload_page;
38534- atomic_long_t tlb_dropin_fail_no_asid;
38535- atomic_long_t tlb_dropin_fail_upm;
38536- atomic_long_t tlb_dropin_fail_invalid;
38537- atomic_long_t tlb_dropin_fail_range_active;
38538- atomic_long_t tlb_dropin_fail_idle;
38539- atomic_long_t tlb_dropin_fail_fmm;
38540- atomic_long_t tlb_dropin_fail_no_exception;
38541- atomic_long_t tfh_stale_on_fault;
38542- atomic_long_t mmu_invalidate_range;
38543- atomic_long_t mmu_invalidate_page;
38544- atomic_long_t flush_tlb;
38545- atomic_long_t flush_tlb_gru;
38546- atomic_long_t flush_tlb_gru_tgh;
38547- atomic_long_t flush_tlb_gru_zero_asid;
38548+ atomic_long_unchecked_t vdata_alloc;
38549+ atomic_long_unchecked_t vdata_free;
38550+ atomic_long_unchecked_t gts_alloc;
38551+ atomic_long_unchecked_t gts_free;
38552+ atomic_long_unchecked_t gms_alloc;
38553+ atomic_long_unchecked_t gms_free;
38554+ atomic_long_unchecked_t gts_double_allocate;
38555+ atomic_long_unchecked_t assign_context;
38556+ atomic_long_unchecked_t assign_context_failed;
38557+ atomic_long_unchecked_t free_context;
38558+ atomic_long_unchecked_t load_user_context;
38559+ atomic_long_unchecked_t load_kernel_context;
38560+ atomic_long_unchecked_t lock_kernel_context;
38561+ atomic_long_unchecked_t unlock_kernel_context;
38562+ atomic_long_unchecked_t steal_user_context;
38563+ atomic_long_unchecked_t steal_kernel_context;
38564+ atomic_long_unchecked_t steal_context_failed;
38565+ atomic_long_unchecked_t nopfn;
38566+ atomic_long_unchecked_t asid_new;
38567+ atomic_long_unchecked_t asid_next;
38568+ atomic_long_unchecked_t asid_wrap;
38569+ atomic_long_unchecked_t asid_reuse;
38570+ atomic_long_unchecked_t intr;
38571+ atomic_long_unchecked_t intr_cbr;
38572+ atomic_long_unchecked_t intr_tfh;
38573+ atomic_long_unchecked_t intr_spurious;
38574+ atomic_long_unchecked_t intr_mm_lock_failed;
38575+ atomic_long_unchecked_t call_os;
38576+ atomic_long_unchecked_t call_os_wait_queue;
38577+ atomic_long_unchecked_t user_flush_tlb;
38578+ atomic_long_unchecked_t user_unload_context;
38579+ atomic_long_unchecked_t user_exception;
38580+ atomic_long_unchecked_t set_context_option;
38581+ atomic_long_unchecked_t check_context_retarget_intr;
38582+ atomic_long_unchecked_t check_context_unload;
38583+ atomic_long_unchecked_t tlb_dropin;
38584+ atomic_long_unchecked_t tlb_preload_page;
38585+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
38586+ atomic_long_unchecked_t tlb_dropin_fail_upm;
38587+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
38588+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
38589+ atomic_long_unchecked_t tlb_dropin_fail_idle;
38590+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
38591+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
38592+ atomic_long_unchecked_t tfh_stale_on_fault;
38593+ atomic_long_unchecked_t mmu_invalidate_range;
38594+ atomic_long_unchecked_t mmu_invalidate_page;
38595+ atomic_long_unchecked_t flush_tlb;
38596+ atomic_long_unchecked_t flush_tlb_gru;
38597+ atomic_long_unchecked_t flush_tlb_gru_tgh;
38598+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
38599
38600- atomic_long_t copy_gpa;
38601- atomic_long_t read_gpa;
38602+ atomic_long_unchecked_t copy_gpa;
38603+ atomic_long_unchecked_t read_gpa;
38604
38605- atomic_long_t mesq_receive;
38606- atomic_long_t mesq_receive_none;
38607- atomic_long_t mesq_send;
38608- atomic_long_t mesq_send_failed;
38609- atomic_long_t mesq_noop;
38610- atomic_long_t mesq_send_unexpected_error;
38611- atomic_long_t mesq_send_lb_overflow;
38612- atomic_long_t mesq_send_qlimit_reached;
38613- atomic_long_t mesq_send_amo_nacked;
38614- atomic_long_t mesq_send_put_nacked;
38615- atomic_long_t mesq_page_overflow;
38616- atomic_long_t mesq_qf_locked;
38617- atomic_long_t mesq_qf_noop_not_full;
38618- atomic_long_t mesq_qf_switch_head_failed;
38619- atomic_long_t mesq_qf_unexpected_error;
38620- atomic_long_t mesq_noop_unexpected_error;
38621- atomic_long_t mesq_noop_lb_overflow;
38622- atomic_long_t mesq_noop_qlimit_reached;
38623- atomic_long_t mesq_noop_amo_nacked;
38624- atomic_long_t mesq_noop_put_nacked;
38625- atomic_long_t mesq_noop_page_overflow;
38626+ atomic_long_unchecked_t mesq_receive;
38627+ atomic_long_unchecked_t mesq_receive_none;
38628+ atomic_long_unchecked_t mesq_send;
38629+ atomic_long_unchecked_t mesq_send_failed;
38630+ atomic_long_unchecked_t mesq_noop;
38631+ atomic_long_unchecked_t mesq_send_unexpected_error;
38632+ atomic_long_unchecked_t mesq_send_lb_overflow;
38633+ atomic_long_unchecked_t mesq_send_qlimit_reached;
38634+ atomic_long_unchecked_t mesq_send_amo_nacked;
38635+ atomic_long_unchecked_t mesq_send_put_nacked;
38636+ atomic_long_unchecked_t mesq_page_overflow;
38637+ atomic_long_unchecked_t mesq_qf_locked;
38638+ atomic_long_unchecked_t mesq_qf_noop_not_full;
38639+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
38640+ atomic_long_unchecked_t mesq_qf_unexpected_error;
38641+ atomic_long_unchecked_t mesq_noop_unexpected_error;
38642+ atomic_long_unchecked_t mesq_noop_lb_overflow;
38643+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
38644+ atomic_long_unchecked_t mesq_noop_amo_nacked;
38645+ atomic_long_unchecked_t mesq_noop_put_nacked;
38646+ atomic_long_unchecked_t mesq_noop_page_overflow;
38647
38648 };
38649
38650@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
38651 tghop_invalidate, mcsop_last};
38652
38653 struct mcs_op_statistic {
38654- atomic_long_t count;
38655- atomic_long_t total;
38656+ atomic_long_unchecked_t count;
38657+ atomic_long_unchecked_t total;
38658 unsigned long max;
38659 };
38660
38661@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
38662
38663 #define STAT(id) do { \
38664 if (gru_options & OPT_STATS) \
38665- atomic_long_inc(&gru_stats.id); \
38666+ atomic_long_inc_unchecked(&gru_stats.id); \
38667 } while (0)
38668
38669 #ifdef CONFIG_SGI_GRU_DEBUG
38670diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
38671index c862cd4..0d176fe 100644
38672--- a/drivers/misc/sgi-xp/xp.h
38673+++ b/drivers/misc/sgi-xp/xp.h
38674@@ -288,7 +288,7 @@ struct xpc_interface {
38675 xpc_notify_func, void *);
38676 void (*received) (short, int, void *);
38677 enum xp_retval (*partid_to_nasids) (short, void *);
38678-};
38679+} __no_const;
38680
38681 extern struct xpc_interface xpc_interface;
38682
38683diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
38684index b94d5f7..7f494c5 100644
38685--- a/drivers/misc/sgi-xp/xpc.h
38686+++ b/drivers/misc/sgi-xp/xpc.h
38687@@ -835,6 +835,7 @@ struct xpc_arch_operations {
38688 void (*received_payload) (struct xpc_channel *, void *);
38689 void (*notify_senders_of_disconnect) (struct xpc_channel *);
38690 };
38691+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
38692
38693 /* struct xpc_partition act_state values (for XPC HB) */
38694
38695@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
38696 /* found in xpc_main.c */
38697 extern struct device *xpc_part;
38698 extern struct device *xpc_chan;
38699-extern struct xpc_arch_operations xpc_arch_ops;
38700+extern xpc_arch_operations_no_const xpc_arch_ops;
38701 extern int xpc_disengage_timelimit;
38702 extern int xpc_disengage_timedout;
38703 extern int xpc_activate_IRQ_rcvd;
38704diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
38705index d971817..33bdca5 100644
38706--- a/drivers/misc/sgi-xp/xpc_main.c
38707+++ b/drivers/misc/sgi-xp/xpc_main.c
38708@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
38709 .notifier_call = xpc_system_die,
38710 };
38711
38712-struct xpc_arch_operations xpc_arch_ops;
38713+xpc_arch_operations_no_const xpc_arch_ops;
38714
38715 /*
38716 * Timer function to enforce the timelimit on the partition disengage.
38717@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
38718
38719 if (((die_args->trapnr == X86_TRAP_MF) ||
38720 (die_args->trapnr == X86_TRAP_XF)) &&
38721- !user_mode_vm(die_args->regs))
38722+ !user_mode(die_args->regs))
38723 xpc_die_deactivate();
38724
38725 break;
38726diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
38727index 6d8f701..35b6369 100644
38728--- a/drivers/mmc/core/mmc_ops.c
38729+++ b/drivers/mmc/core/mmc_ops.c
38730@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
38731 void *data_buf;
38732 int is_on_stack;
38733
38734- is_on_stack = object_is_on_stack(buf);
38735+ is_on_stack = object_starts_on_stack(buf);
38736 if (is_on_stack) {
38737 /*
38738 * dma onto stack is unsafe/nonportable, but callers to this
38739diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
38740index 53b8fd9..615b462 100644
38741--- a/drivers/mmc/host/dw_mmc.h
38742+++ b/drivers/mmc/host/dw_mmc.h
38743@@ -205,5 +205,5 @@ struct dw_mci_drv_data {
38744 int (*parse_dt)(struct dw_mci *host);
38745 int (*setup_bus)(struct dw_mci *host,
38746 struct device_node *slot_np, u8 bus_width);
38747-};
38748+} __do_const;
38749 #endif /* _DW_MMC_H_ */
38750diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
38751index 82a8de1..3c56ccb 100644
38752--- a/drivers/mmc/host/sdhci-s3c.c
38753+++ b/drivers/mmc/host/sdhci-s3c.c
38754@@ -721,9 +721,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
38755 * we can use overriding functions instead of default.
38756 */
38757 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
38758- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
38759- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
38760- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
38761+ pax_open_kernel();
38762+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
38763+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
38764+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
38765+ pax_close_kernel();
38766 }
38767
38768 /* It supports additional host capabilities if needed */
38769diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
38770index a4eb8b5..8c0628f 100644
38771--- a/drivers/mtd/devices/doc2000.c
38772+++ b/drivers/mtd/devices/doc2000.c
38773@@ -753,7 +753,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
38774
38775 /* The ECC will not be calculated correctly if less than 512 is written */
38776 /* DBB-
38777- if (len != 0x200 && eccbuf)
38778+ if (len != 0x200)
38779 printk(KERN_WARNING
38780 "ECC needs a full sector write (adr: %lx size %lx)\n",
38781 (long) to, (long) len);
38782diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
38783index 0c8bb6b..6f35deb 100644
38784--- a/drivers/mtd/nand/denali.c
38785+++ b/drivers/mtd/nand/denali.c
38786@@ -24,6 +24,7 @@
38787 #include <linux/slab.h>
38788 #include <linux/mtd/mtd.h>
38789 #include <linux/module.h>
38790+#include <linux/slab.h>
38791
38792 #include "denali.h"
38793
38794diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
38795index 51b9d6a..52af9a7 100644
38796--- a/drivers/mtd/nftlmount.c
38797+++ b/drivers/mtd/nftlmount.c
38798@@ -24,6 +24,7 @@
38799 #include <asm/errno.h>
38800 #include <linux/delay.h>
38801 #include <linux/slab.h>
38802+#include <linux/sched.h>
38803 #include <linux/mtd/mtd.h>
38804 #include <linux/mtd/nand.h>
38805 #include <linux/mtd/nftl.h>
38806diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
38807index 8dd6ba5..419cc1d 100644
38808--- a/drivers/mtd/sm_ftl.c
38809+++ b/drivers/mtd/sm_ftl.c
38810@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
38811 #define SM_CIS_VENDOR_OFFSET 0x59
38812 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
38813 {
38814- struct attribute_group *attr_group;
38815+ attribute_group_no_const *attr_group;
38816 struct attribute **attributes;
38817 struct sm_sysfs_attribute *vendor_attribute;
38818
38819diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
38820index b7d45f3..b5c89d9 100644
38821--- a/drivers/net/bonding/bond_main.c
38822+++ b/drivers/net/bonding/bond_main.c
38823@@ -4861,7 +4861,7 @@ static unsigned int bond_get_num_tx_queues(void)
38824 return tx_queues;
38825 }
38826
38827-static struct rtnl_link_ops bond_link_ops __read_mostly = {
38828+static struct rtnl_link_ops bond_link_ops = {
38829 .kind = "bond",
38830 .priv_size = sizeof(struct bonding),
38831 .setup = bond_setup,
38832diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
38833index 70dba5d..11a0919 100644
38834--- a/drivers/net/ethernet/8390/ax88796.c
38835+++ b/drivers/net/ethernet/8390/ax88796.c
38836@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
38837 if (ax->plat->reg_offsets)
38838 ei_local->reg_offset = ax->plat->reg_offsets;
38839 else {
38840+ resource_size_t _mem_size = mem_size;
38841+ do_div(_mem_size, 0x18);
38842 ei_local->reg_offset = ax->reg_offsets;
38843 for (ret = 0; ret < 0x18; ret++)
38844- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
38845+ ax->reg_offsets[ret] = _mem_size * ret;
38846 }
38847
38848 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
38849diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
38850index 0991534..8098e92 100644
38851--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
38852+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
38853@@ -1094,7 +1094,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
38854 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
38855 {
38856 /* RX_MODE controlling object */
38857- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
38858+ bnx2x_init_rx_mode_obj(bp);
38859
38860 /* multicast configuration controlling object */
38861 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
38862diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
38863index 09b625e..15b16fe 100644
38864--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
38865+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
38866@@ -2375,15 +2375,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
38867 return rc;
38868 }
38869
38870-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
38871- struct bnx2x_rx_mode_obj *o)
38872+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
38873 {
38874 if (CHIP_IS_E1x(bp)) {
38875- o->wait_comp = bnx2x_empty_rx_mode_wait;
38876- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
38877+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
38878+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
38879 } else {
38880- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
38881- o->config_rx_mode = bnx2x_set_rx_mode_e2;
38882+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
38883+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
38884 }
38885 }
38886
38887diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
38888index adbd91b..58ec94a 100644
38889--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
38890+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
38891@@ -1293,8 +1293,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
38892
38893 /********************* RX MODE ****************/
38894
38895-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
38896- struct bnx2x_rx_mode_obj *o);
38897+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
38898
38899 /**
38900 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
38901diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
38902index d330e81..ce1fb9a 100644
38903--- a/drivers/net/ethernet/broadcom/tg3.h
38904+++ b/drivers/net/ethernet/broadcom/tg3.h
38905@@ -146,6 +146,7 @@
38906 #define CHIPREV_ID_5750_A0 0x4000
38907 #define CHIPREV_ID_5750_A1 0x4001
38908 #define CHIPREV_ID_5750_A3 0x4003
38909+#define CHIPREV_ID_5750_C1 0x4201
38910 #define CHIPREV_ID_5750_C2 0x4202
38911 #define CHIPREV_ID_5752_A0_HW 0x5000
38912 #define CHIPREV_ID_5752_A0 0x6000
38913diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
38914index 8cffcdf..aadf043 100644
38915--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
38916+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
38917@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
38918 */
38919 struct l2t_skb_cb {
38920 arp_failure_handler_func arp_failure_handler;
38921-};
38922+} __no_const;
38923
38924 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
38925
38926diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
38927index 4c83003..2a2a5b9 100644
38928--- a/drivers/net/ethernet/dec/tulip/de4x5.c
38929+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
38930@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38931 for (i=0; i<ETH_ALEN; i++) {
38932 tmp.addr[i] = dev->dev_addr[i];
38933 }
38934- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38935+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
38936 break;
38937
38938 case DE4X5_SET_HWADDR: /* Set the hardware address */
38939@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38940 spin_lock_irqsave(&lp->lock, flags);
38941 memcpy(&statbuf, &lp->pktStats, ioc->len);
38942 spin_unlock_irqrestore(&lp->lock, flags);
38943- if (copy_to_user(ioc->data, &statbuf, ioc->len))
38944+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
38945 return -EFAULT;
38946 break;
38947 }
38948diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
38949index 4d6f3c5..6169e60 100644
38950--- a/drivers/net/ethernet/emulex/benet/be_main.c
38951+++ b/drivers/net/ethernet/emulex/benet/be_main.c
38952@@ -455,7 +455,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
38953
38954 if (wrapped)
38955 newacc += 65536;
38956- ACCESS_ONCE(*acc) = newacc;
38957+ ACCESS_ONCE_RW(*acc) = newacc;
38958 }
38959
38960 void be_parse_stats(struct be_adapter *adapter)
38961diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
38962index 74d749e..eefb1bd 100644
38963--- a/drivers/net/ethernet/faraday/ftgmac100.c
38964+++ b/drivers/net/ethernet/faraday/ftgmac100.c
38965@@ -31,6 +31,8 @@
38966 #include <linux/netdevice.h>
38967 #include <linux/phy.h>
38968 #include <linux/platform_device.h>
38969+#include <linux/interrupt.h>
38970+#include <linux/irqreturn.h>
38971 #include <net/ip.h>
38972
38973 #include "ftgmac100.h"
38974diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
38975index b901a01..1ff32ee 100644
38976--- a/drivers/net/ethernet/faraday/ftmac100.c
38977+++ b/drivers/net/ethernet/faraday/ftmac100.c
38978@@ -31,6 +31,8 @@
38979 #include <linux/module.h>
38980 #include <linux/netdevice.h>
38981 #include <linux/platform_device.h>
38982+#include <linux/interrupt.h>
38983+#include <linux/irqreturn.h>
38984
38985 #include "ftmac100.h"
38986
38987diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
38988index bb9256a..56d8752 100644
38989--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
38990+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
38991@@ -806,7 +806,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
38992 }
38993
38994 /* update the base incval used to calculate frequency adjustment */
38995- ACCESS_ONCE(adapter->base_incval) = incval;
38996+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
38997 smp_mb();
38998
38999 /* need lock to prevent incorrect read while modifying cyclecounter */
39000diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39001index fbe5363..266b4e3 100644
39002--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
39003+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
39004@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39005 struct __vxge_hw_fifo *fifo;
39006 struct vxge_hw_fifo_config *config;
39007 u32 txdl_size, txdl_per_memblock;
39008- struct vxge_hw_mempool_cbs fifo_mp_callback;
39009+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
39010+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
39011+ };
39012+
39013 struct __vxge_hw_virtualpath *vpath;
39014
39015 if ((vp == NULL) || (attr == NULL)) {
39016@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
39017 goto exit;
39018 }
39019
39020- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
39021-
39022 fifo->mempool =
39023 __vxge_hw_mempool_create(vpath->hldev,
39024 fifo->config->memblock_size,
39025diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
39026index 998974f..ecd26db 100644
39027--- a/drivers/net/ethernet/realtek/r8169.c
39028+++ b/drivers/net/ethernet/realtek/r8169.c
39029@@ -741,22 +741,22 @@ struct rtl8169_private {
39030 struct mdio_ops {
39031 void (*write)(struct rtl8169_private *, int, int);
39032 int (*read)(struct rtl8169_private *, int);
39033- } mdio_ops;
39034+ } __no_const mdio_ops;
39035
39036 struct pll_power_ops {
39037 void (*down)(struct rtl8169_private *);
39038 void (*up)(struct rtl8169_private *);
39039- } pll_power_ops;
39040+ } __no_const pll_power_ops;
39041
39042 struct jumbo_ops {
39043 void (*enable)(struct rtl8169_private *);
39044 void (*disable)(struct rtl8169_private *);
39045- } jumbo_ops;
39046+ } __no_const jumbo_ops;
39047
39048 struct csi_ops {
39049 void (*write)(struct rtl8169_private *, int, int);
39050 u32 (*read)(struct rtl8169_private *, int);
39051- } csi_ops;
39052+ } __no_const csi_ops;
39053
39054 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
39055 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
39056diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
39057index 0767043f..08c2553 100644
39058--- a/drivers/net/ethernet/sfc/ptp.c
39059+++ b/drivers/net/ethernet/sfc/ptp.c
39060@@ -553,7 +553,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
39061 (u32)((u64)ptp->start.dma_addr >> 32));
39062
39063 /* Clear flag that signals MC ready */
39064- ACCESS_ONCE(*start) = 0;
39065+ ACCESS_ONCE_RW(*start) = 0;
39066 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
39067 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
39068
39069diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39070index 0c74a70..3bc6f68 100644
39071--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39072+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
39073@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
39074
39075 writel(value, ioaddr + MMC_CNTRL);
39076
39077- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39078- MMC_CNTRL, value);
39079+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
39080+// MMC_CNTRL, value);
39081 }
39082
39083 /* To mask all all interrupts.*/
39084diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
39085index e6fe0d8..2b7d752 100644
39086--- a/drivers/net/hyperv/hyperv_net.h
39087+++ b/drivers/net/hyperv/hyperv_net.h
39088@@ -101,7 +101,7 @@ struct rndis_device {
39089
39090 enum rndis_device_state state;
39091 bool link_state;
39092- atomic_t new_req_id;
39093+ atomic_unchecked_t new_req_id;
39094
39095 spinlock_t request_lock;
39096 struct list_head req_list;
39097diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
39098index 2b657d4..9903bc0 100644
39099--- a/drivers/net/hyperv/rndis_filter.c
39100+++ b/drivers/net/hyperv/rndis_filter.c
39101@@ -107,7 +107,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
39102 * template
39103 */
39104 set = &rndis_msg->msg.set_req;
39105- set->req_id = atomic_inc_return(&dev->new_req_id);
39106+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39107
39108 /* Add to the request list */
39109 spin_lock_irqsave(&dev->request_lock, flags);
39110@@ -758,7 +758,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
39111
39112 /* Setup the rndis set */
39113 halt = &request->request_msg.msg.halt_req;
39114- halt->req_id = atomic_inc_return(&dev->new_req_id);
39115+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
39116
39117 /* Ignore return since this msg is optional. */
39118 rndis_filter_send_request(dev, request);
39119diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
39120index 1e9cb0b..7839125 100644
39121--- a/drivers/net/ieee802154/fakehard.c
39122+++ b/drivers/net/ieee802154/fakehard.c
39123@@ -386,7 +386,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
39124 phy->transmit_power = 0xbf;
39125
39126 dev->netdev_ops = &fake_ops;
39127- dev->ml_priv = &fake_mlme;
39128+ dev->ml_priv = (void *)&fake_mlme;
39129
39130 priv = netdev_priv(dev);
39131 priv->phy = phy;
39132diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
39133index d3fb97d..19520c7 100644
39134--- a/drivers/net/macvlan.c
39135+++ b/drivers/net/macvlan.c
39136@@ -851,13 +851,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
39137 int macvlan_link_register(struct rtnl_link_ops *ops)
39138 {
39139 /* common fields */
39140- ops->priv_size = sizeof(struct macvlan_dev);
39141- ops->validate = macvlan_validate;
39142- ops->maxtype = IFLA_MACVLAN_MAX;
39143- ops->policy = macvlan_policy;
39144- ops->changelink = macvlan_changelink;
39145- ops->get_size = macvlan_get_size;
39146- ops->fill_info = macvlan_fill_info;
39147+ pax_open_kernel();
39148+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
39149+ *(void **)&ops->validate = macvlan_validate;
39150+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
39151+ *(const void **)&ops->policy = macvlan_policy;
39152+ *(void **)&ops->changelink = macvlan_changelink;
39153+ *(void **)&ops->get_size = macvlan_get_size;
39154+ *(void **)&ops->fill_info = macvlan_fill_info;
39155+ pax_close_kernel();
39156
39157 return rtnl_link_register(ops);
39158 };
39159@@ -913,7 +915,7 @@ static int macvlan_device_event(struct notifier_block *unused,
39160 return NOTIFY_DONE;
39161 }
39162
39163-static struct notifier_block macvlan_notifier_block __read_mostly = {
39164+static struct notifier_block macvlan_notifier_block = {
39165 .notifier_call = macvlan_device_event,
39166 };
39167
39168diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
39169index 0f0f9ce..0ca5819 100644
39170--- a/drivers/net/macvtap.c
39171+++ b/drivers/net/macvtap.c
39172@@ -1100,7 +1100,7 @@ static int macvtap_device_event(struct notifier_block *unused,
39173 return NOTIFY_DONE;
39174 }
39175
39176-static struct notifier_block macvtap_notifier_block __read_mostly = {
39177+static struct notifier_block macvtap_notifier_block = {
39178 .notifier_call = macvtap_device_event,
39179 };
39180
39181diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
39182index daec9b0..6428fcb 100644
39183--- a/drivers/net/phy/mdio-bitbang.c
39184+++ b/drivers/net/phy/mdio-bitbang.c
39185@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
39186 struct mdiobb_ctrl *ctrl = bus->priv;
39187
39188 module_put(ctrl->ops->owner);
39189+ mdiobus_unregister(bus);
39190 mdiobus_free(bus);
39191 }
39192 EXPORT_SYMBOL(free_mdio_bitbang);
39193diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
39194index 508570e..f706dc7 100644
39195--- a/drivers/net/ppp/ppp_generic.c
39196+++ b/drivers/net/ppp/ppp_generic.c
39197@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39198 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
39199 struct ppp_stats stats;
39200 struct ppp_comp_stats cstats;
39201- char *vers;
39202
39203 switch (cmd) {
39204 case SIOCGPPPSTATS:
39205@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
39206 break;
39207
39208 case SIOCGPPPVER:
39209- vers = PPP_VERSION;
39210- if (copy_to_user(addr, vers, strlen(vers) + 1))
39211+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
39212 break;
39213 err = 0;
39214 break;
39215diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
39216index ad86660..9fd0884 100644
39217--- a/drivers/net/team/team.c
39218+++ b/drivers/net/team/team.c
39219@@ -2601,7 +2601,7 @@ static int team_device_event(struct notifier_block *unused,
39220 return NOTIFY_DONE;
39221 }
39222
39223-static struct notifier_block team_notifier_block __read_mostly = {
39224+static struct notifier_block team_notifier_block = {
39225 .notifier_call = team_device_event,
39226 };
39227
39228diff --git a/drivers/net/tun.c b/drivers/net/tun.c
39229index 2917a86..edd463f 100644
39230--- a/drivers/net/tun.c
39231+++ b/drivers/net/tun.c
39232@@ -1836,7 +1836,7 @@ unlock:
39233 }
39234
39235 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39236- unsigned long arg, int ifreq_len)
39237+ unsigned long arg, size_t ifreq_len)
39238 {
39239 struct tun_file *tfile = file->private_data;
39240 struct tun_struct *tun;
39241@@ -1848,6 +1848,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
39242 int vnet_hdr_sz;
39243 int ret;
39244
39245+ if (ifreq_len > sizeof ifr)
39246+ return -EFAULT;
39247+
39248 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
39249 if (copy_from_user(&ifr, argp, ifreq_len))
39250 return -EFAULT;
39251diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39252index cd8ccb2..cff5144 100644
39253--- a/drivers/net/usb/hso.c
39254+++ b/drivers/net/usb/hso.c
39255@@ -71,7 +71,7 @@
39256 #include <asm/byteorder.h>
39257 #include <linux/serial_core.h>
39258 #include <linux/serial.h>
39259-
39260+#include <asm/local.h>
39261
39262 #define MOD_AUTHOR "Option Wireless"
39263 #define MOD_DESCRIPTION "USB High Speed Option driver"
39264@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39265 struct urb *urb;
39266
39267 urb = serial->rx_urb[0];
39268- if (serial->port.count > 0) {
39269+ if (atomic_read(&serial->port.count) > 0) {
39270 count = put_rxbuf_data(urb, serial);
39271 if (count == -1)
39272 return;
39273@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39274 DUMP1(urb->transfer_buffer, urb->actual_length);
39275
39276 /* Anyone listening? */
39277- if (serial->port.count == 0)
39278+ if (atomic_read(&serial->port.count) == 0)
39279 return;
39280
39281 if (status == 0) {
39282@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39283 tty_port_tty_set(&serial->port, tty);
39284
39285 /* check for port already opened, if not set the termios */
39286- serial->port.count++;
39287- if (serial->port.count == 1) {
39288+ if (atomic_inc_return(&serial->port.count) == 1) {
39289 serial->rx_state = RX_IDLE;
39290 /* Force default termio settings */
39291 _hso_serial_set_termios(tty, NULL);
39292@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39293 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39294 if (result) {
39295 hso_stop_serial_device(serial->parent);
39296- serial->port.count--;
39297+ atomic_dec(&serial->port.count);
39298 kref_put(&serial->parent->ref, hso_serial_ref_free);
39299 }
39300 } else {
39301@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39302
39303 /* reset the rts and dtr */
39304 /* do the actual close */
39305- serial->port.count--;
39306+ atomic_dec(&serial->port.count);
39307
39308- if (serial->port.count <= 0) {
39309- serial->port.count = 0;
39310+ if (atomic_read(&serial->port.count) <= 0) {
39311+ atomic_set(&serial->port.count, 0);
39312 tty_port_tty_set(&serial->port, NULL);
39313 if (!usb_gone)
39314 hso_stop_serial_device(serial->parent);
39315@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39316
39317 /* the actual setup */
39318 spin_lock_irqsave(&serial->serial_lock, flags);
39319- if (serial->port.count)
39320+ if (atomic_read(&serial->port.count))
39321 _hso_serial_set_termios(tty, old);
39322 else
39323 tty->termios = *old;
39324@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
39325 D1("Pending read interrupt on port %d\n", i);
39326 spin_lock(&serial->serial_lock);
39327 if (serial->rx_state == RX_IDLE &&
39328- serial->port.count > 0) {
39329+ atomic_read(&serial->port.count) > 0) {
39330 /* Setup and send a ctrl req read on
39331 * port i */
39332 if (!serial->rx_urb_filled[0]) {
39333@@ -3079,7 +3078,7 @@ static int hso_resume(struct usb_interface *iface)
39334 /* Start all serial ports */
39335 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39336 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39337- if (dev2ser(serial_table[i])->port.count) {
39338+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
39339 result =
39340 hso_start_serial_device(serial_table[i], GFP_NOIO);
39341 hso_kick_transmit(dev2ser(serial_table[i]));
39342diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
39343index 656230e..15525a8 100644
39344--- a/drivers/net/vxlan.c
39345+++ b/drivers/net/vxlan.c
39346@@ -1428,7 +1428,7 @@ nla_put_failure:
39347 return -EMSGSIZE;
39348 }
39349
39350-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
39351+static struct rtnl_link_ops vxlan_link_ops = {
39352 .kind = "vxlan",
39353 .maxtype = IFLA_VXLAN_MAX,
39354 .policy = vxlan_policy,
39355diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39356index 8d78253..bebbb68 100644
39357--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39358+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
39359@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39360 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
39361 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
39362
39363- ACCESS_ONCE(ads->ds_link) = i->link;
39364- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
39365+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
39366+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
39367
39368 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
39369 ctl6 = SM(i->keytype, AR_EncrType);
39370@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39371
39372 if ((i->is_first || i->is_last) &&
39373 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
39374- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
39375+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
39376 | set11nTries(i->rates, 1)
39377 | set11nTries(i->rates, 2)
39378 | set11nTries(i->rates, 3)
39379 | (i->dur_update ? AR_DurUpdateEna : 0)
39380 | SM(0, AR_BurstDur);
39381
39382- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
39383+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
39384 | set11nRate(i->rates, 1)
39385 | set11nRate(i->rates, 2)
39386 | set11nRate(i->rates, 3);
39387 } else {
39388- ACCESS_ONCE(ads->ds_ctl2) = 0;
39389- ACCESS_ONCE(ads->ds_ctl3) = 0;
39390+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
39391+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
39392 }
39393
39394 if (!i->is_first) {
39395- ACCESS_ONCE(ads->ds_ctl0) = 0;
39396- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39397- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39398+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
39399+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39400+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39401 return;
39402 }
39403
39404@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39405 break;
39406 }
39407
39408- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39409+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
39410 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39411 | SM(i->txpower, AR_XmitPower)
39412 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39413@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39414 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
39415 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
39416
39417- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
39418- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
39419+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
39420+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
39421
39422 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
39423 return;
39424
39425- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39426+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
39427 | set11nPktDurRTSCTS(i->rates, 1);
39428
39429- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39430+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
39431 | set11nPktDurRTSCTS(i->rates, 3);
39432
39433- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39434+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
39435 | set11nRateFlags(i->rates, 1)
39436 | set11nRateFlags(i->rates, 2)
39437 | set11nRateFlags(i->rates, 3)
39438diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39439index 301bf72..3f5654f 100644
39440--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39441+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
39442@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39443 (i->qcu << AR_TxQcuNum_S) | desc_len;
39444
39445 checksum += val;
39446- ACCESS_ONCE(ads->info) = val;
39447+ ACCESS_ONCE_RW(ads->info) = val;
39448
39449 checksum += i->link;
39450- ACCESS_ONCE(ads->link) = i->link;
39451+ ACCESS_ONCE_RW(ads->link) = i->link;
39452
39453 checksum += i->buf_addr[0];
39454- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
39455+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
39456 checksum += i->buf_addr[1];
39457- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
39458+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
39459 checksum += i->buf_addr[2];
39460- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
39461+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
39462 checksum += i->buf_addr[3];
39463- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
39464+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
39465
39466 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
39467- ACCESS_ONCE(ads->ctl3) = val;
39468+ ACCESS_ONCE_RW(ads->ctl3) = val;
39469 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
39470- ACCESS_ONCE(ads->ctl5) = val;
39471+ ACCESS_ONCE_RW(ads->ctl5) = val;
39472 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
39473- ACCESS_ONCE(ads->ctl7) = val;
39474+ ACCESS_ONCE_RW(ads->ctl7) = val;
39475 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
39476- ACCESS_ONCE(ads->ctl9) = val;
39477+ ACCESS_ONCE_RW(ads->ctl9) = val;
39478
39479 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
39480- ACCESS_ONCE(ads->ctl10) = checksum;
39481+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
39482
39483 if (i->is_first || i->is_last) {
39484- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
39485+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
39486 | set11nTries(i->rates, 1)
39487 | set11nTries(i->rates, 2)
39488 | set11nTries(i->rates, 3)
39489 | (i->dur_update ? AR_DurUpdateEna : 0)
39490 | SM(0, AR_BurstDur);
39491
39492- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
39493+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
39494 | set11nRate(i->rates, 1)
39495 | set11nRate(i->rates, 2)
39496 | set11nRate(i->rates, 3);
39497 } else {
39498- ACCESS_ONCE(ads->ctl13) = 0;
39499- ACCESS_ONCE(ads->ctl14) = 0;
39500+ ACCESS_ONCE_RW(ads->ctl13) = 0;
39501+ ACCESS_ONCE_RW(ads->ctl14) = 0;
39502 }
39503
39504 ads->ctl20 = 0;
39505@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39506
39507 ctl17 = SM(i->keytype, AR_EncrType);
39508 if (!i->is_first) {
39509- ACCESS_ONCE(ads->ctl11) = 0;
39510- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
39511- ACCESS_ONCE(ads->ctl15) = 0;
39512- ACCESS_ONCE(ads->ctl16) = 0;
39513- ACCESS_ONCE(ads->ctl17) = ctl17;
39514- ACCESS_ONCE(ads->ctl18) = 0;
39515- ACCESS_ONCE(ads->ctl19) = 0;
39516+ ACCESS_ONCE_RW(ads->ctl11) = 0;
39517+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
39518+ ACCESS_ONCE_RW(ads->ctl15) = 0;
39519+ ACCESS_ONCE_RW(ads->ctl16) = 0;
39520+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
39521+ ACCESS_ONCE_RW(ads->ctl18) = 0;
39522+ ACCESS_ONCE_RW(ads->ctl19) = 0;
39523 return;
39524 }
39525
39526- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
39527+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
39528 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
39529 | SM(i->txpower, AR_XmitPower)
39530 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
39531@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
39532 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
39533 ctl12 |= SM(val, AR_PAPRDChainMask);
39534
39535- ACCESS_ONCE(ads->ctl12) = ctl12;
39536- ACCESS_ONCE(ads->ctl17) = ctl17;
39537+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
39538+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
39539
39540- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
39541+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
39542 | set11nPktDurRTSCTS(i->rates, 1);
39543
39544- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
39545+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
39546 | set11nPktDurRTSCTS(i->rates, 3);
39547
39548- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
39549+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
39550 | set11nRateFlags(i->rates, 1)
39551 | set11nRateFlags(i->rates, 2)
39552 | set11nRateFlags(i->rates, 3)
39553 | SM(i->rtscts_rate, AR_RTSCTSRate);
39554
39555- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
39556+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
39557 }
39558
39559 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
39560diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
39561index 9d26fc5..60d9f14 100644
39562--- a/drivers/net/wireless/ath/ath9k/hw.h
39563+++ b/drivers/net/wireless/ath/ath9k/hw.h
39564@@ -658,7 +658,7 @@ struct ath_hw_private_ops {
39565
39566 /* ANI */
39567 void (*ani_cache_ini_regs)(struct ath_hw *ah);
39568-};
39569+} __no_const;
39570
39571 /**
39572 * struct ath_hw_ops - callbacks used by hardware code and driver code
39573@@ -688,7 +688,7 @@ struct ath_hw_ops {
39574 void (*antdiv_comb_conf_set)(struct ath_hw *ah,
39575 struct ath_hw_antcomb_conf *antconf);
39576 void (*antctrl_shared_chain_lnadiv)(struct ath_hw *hw, bool enable);
39577-};
39578+} __no_const;
39579
39580 struct ath_nf_limits {
39581 s16 max;
39582diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
39583index 3726cd6..b655808 100644
39584--- a/drivers/net/wireless/iwlegacy/3945-mac.c
39585+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
39586@@ -3615,7 +3615,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39587 */
39588 if (il3945_mod_params.disable_hw_scan) {
39589 D_INFO("Disabling hw_scan\n");
39590- il3945_mac_ops.hw_scan = NULL;
39591+ pax_open_kernel();
39592+ *(void **)&il3945_mac_ops.hw_scan = NULL;
39593+ pax_close_kernel();
39594 }
39595
39596 D_INFO("*** LOAD DRIVER ***\n");
39597diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
39598index 5b9533e..7733880 100644
39599--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
39600+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
39601@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
39602 {
39603 struct iwl_priv *priv = file->private_data;
39604 char buf[64];
39605- int buf_size;
39606+ size_t buf_size;
39607 u32 offset, len;
39608
39609 memset(buf, 0, sizeof(buf));
39610@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
39611 struct iwl_priv *priv = file->private_data;
39612
39613 char buf[8];
39614- int buf_size;
39615+ size_t buf_size;
39616 u32 reset_flag;
39617
39618 memset(buf, 0, sizeof(buf));
39619@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
39620 {
39621 struct iwl_priv *priv = file->private_data;
39622 char buf[8];
39623- int buf_size;
39624+ size_t buf_size;
39625 int ht40;
39626
39627 memset(buf, 0, sizeof(buf));
39628@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
39629 {
39630 struct iwl_priv *priv = file->private_data;
39631 char buf[8];
39632- int buf_size;
39633+ size_t buf_size;
39634 int value;
39635
39636 memset(buf, 0, sizeof(buf));
39637@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
39638 {
39639 struct iwl_priv *priv = file->private_data;
39640 char buf[8];
39641- int buf_size;
39642+ size_t buf_size;
39643 int clear;
39644
39645 memset(buf, 0, sizeof(buf));
39646@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
39647 {
39648 struct iwl_priv *priv = file->private_data;
39649 char buf[8];
39650- int buf_size;
39651+ size_t buf_size;
39652 int trace;
39653
39654 memset(buf, 0, sizeof(buf));
39655@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
39656 {
39657 struct iwl_priv *priv = file->private_data;
39658 char buf[8];
39659- int buf_size;
39660+ size_t buf_size;
39661 int missed;
39662
39663 memset(buf, 0, sizeof(buf));
39664@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
39665
39666 struct iwl_priv *priv = file->private_data;
39667 char buf[8];
39668- int buf_size;
39669+ size_t buf_size;
39670 int plcp;
39671
39672 memset(buf, 0, sizeof(buf));
39673@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
39674
39675 struct iwl_priv *priv = file->private_data;
39676 char buf[8];
39677- int buf_size;
39678+ size_t buf_size;
39679 int flush;
39680
39681 memset(buf, 0, sizeof(buf));
39682@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
39683
39684 struct iwl_priv *priv = file->private_data;
39685 char buf[8];
39686- int buf_size;
39687+ size_t buf_size;
39688 int rts;
39689
39690 if (!priv->cfg->ht_params)
39691@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
39692 {
39693 struct iwl_priv *priv = file->private_data;
39694 char buf[8];
39695- int buf_size;
39696+ size_t buf_size;
39697
39698 memset(buf, 0, sizeof(buf));
39699 buf_size = min(count, sizeof(buf) - 1);
39700@@ -2256,7 +2256,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
39701 struct iwl_priv *priv = file->private_data;
39702 u32 event_log_flag;
39703 char buf[8];
39704- int buf_size;
39705+ size_t buf_size;
39706
39707 /* check that the interface is up */
39708 if (!iwl_is_ready(priv))
39709@@ -2310,7 +2310,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
39710 struct iwl_priv *priv = file->private_data;
39711 char buf[8];
39712 u32 calib_disabled;
39713- int buf_size;
39714+ size_t buf_size;
39715
39716 memset(buf, 0, sizeof(buf));
39717 buf_size = min(count, sizeof(buf) - 1);
39718diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
39719index 35708b9..31f7754 100644
39720--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
39721+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
39722@@ -1100,7 +1100,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
39723 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
39724
39725 char buf[8];
39726- int buf_size;
39727+ size_t buf_size;
39728 u32 reset_flag;
39729
39730 memset(buf, 0, sizeof(buf));
39731@@ -1121,7 +1121,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
39732 {
39733 struct iwl_trans *trans = file->private_data;
39734 char buf[8];
39735- int buf_size;
39736+ size_t buf_size;
39737 int csr;
39738
39739 memset(buf, 0, sizeof(buf));
39740diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
39741index ff90855..e46d223 100644
39742--- a/drivers/net/wireless/mac80211_hwsim.c
39743+++ b/drivers/net/wireless/mac80211_hwsim.c
39744@@ -2062,25 +2062,19 @@ static int __init init_mac80211_hwsim(void)
39745
39746 if (channels > 1) {
39747 hwsim_if_comb.num_different_channels = channels;
39748- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
39749- mac80211_hwsim_ops.cancel_hw_scan =
39750- mac80211_hwsim_cancel_hw_scan;
39751- mac80211_hwsim_ops.sw_scan_start = NULL;
39752- mac80211_hwsim_ops.sw_scan_complete = NULL;
39753- mac80211_hwsim_ops.remain_on_channel =
39754- mac80211_hwsim_roc;
39755- mac80211_hwsim_ops.cancel_remain_on_channel =
39756- mac80211_hwsim_croc;
39757- mac80211_hwsim_ops.add_chanctx =
39758- mac80211_hwsim_add_chanctx;
39759- mac80211_hwsim_ops.remove_chanctx =
39760- mac80211_hwsim_remove_chanctx;
39761- mac80211_hwsim_ops.change_chanctx =
39762- mac80211_hwsim_change_chanctx;
39763- mac80211_hwsim_ops.assign_vif_chanctx =
39764- mac80211_hwsim_assign_vif_chanctx;
39765- mac80211_hwsim_ops.unassign_vif_chanctx =
39766- mac80211_hwsim_unassign_vif_chanctx;
39767+ pax_open_kernel();
39768+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
39769+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
39770+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
39771+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
39772+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
39773+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
39774+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
39775+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
39776+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
39777+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
39778+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
39779+ pax_close_kernel();
39780 }
39781
39782 spin_lock_init(&hwsim_radio_lock);
39783diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39784index abe1d03..fb02c22 100644
39785--- a/drivers/net/wireless/rndis_wlan.c
39786+++ b/drivers/net/wireless/rndis_wlan.c
39787@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39788
39789 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
39790
39791- if (rts_threshold < 0 || rts_threshold > 2347)
39792+ if (rts_threshold > 2347)
39793 rts_threshold = 2347;
39794
39795 tmp = cpu_to_le32(rts_threshold);
39796diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
39797index 0751b35..246ba3e 100644
39798--- a/drivers/net/wireless/rt2x00/rt2x00.h
39799+++ b/drivers/net/wireless/rt2x00/rt2x00.h
39800@@ -398,7 +398,7 @@ struct rt2x00_intf {
39801 * for hardware which doesn't support hardware
39802 * sequence counting.
39803 */
39804- atomic_t seqno;
39805+ atomic_unchecked_t seqno;
39806 };
39807
39808 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
39809diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
39810index e488b94..14b6a0c 100644
39811--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
39812+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
39813@@ -240,9 +240,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
39814 * sequence counter given by mac80211.
39815 */
39816 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
39817- seqno = atomic_add_return(0x10, &intf->seqno);
39818+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
39819 else
39820- seqno = atomic_read(&intf->seqno);
39821+ seqno = atomic_read_unchecked(&intf->seqno);
39822
39823 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
39824 hdr->seq_ctrl |= cpu_to_le16(seqno);
39825diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
39826index e57ee48..541cf6c 100644
39827--- a/drivers/net/wireless/ti/wl1251/sdio.c
39828+++ b/drivers/net/wireless/ti/wl1251/sdio.c
39829@@ -269,13 +269,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
39830
39831 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
39832
39833- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
39834- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
39835+ pax_open_kernel();
39836+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
39837+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
39838+ pax_close_kernel();
39839
39840 wl1251_info("using dedicated interrupt line");
39841 } else {
39842- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
39843- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
39844+ pax_open_kernel();
39845+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
39846+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
39847+ pax_close_kernel();
39848
39849 wl1251_info("using SDIO interrupt");
39850 }
39851diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
39852index e5f5f8f..fdf15b7 100644
39853--- a/drivers/net/wireless/ti/wl12xx/main.c
39854+++ b/drivers/net/wireless/ti/wl12xx/main.c
39855@@ -644,7 +644,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
39856 sizeof(wl->conf.mem));
39857
39858 /* read data preparation is only needed by wl127x */
39859- wl->ops->prepare_read = wl127x_prepare_read;
39860+ pax_open_kernel();
39861+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
39862+ pax_close_kernel();
39863
39864 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
39865 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
39866@@ -665,7 +667,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
39867 sizeof(wl->conf.mem));
39868
39869 /* read data preparation is only needed by wl127x */
39870- wl->ops->prepare_read = wl127x_prepare_read;
39871+ pax_open_kernel();
39872+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
39873+ pax_close_kernel();
39874
39875 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER, WL127X_IFTYPE_VER,
39876 WL127X_MAJOR_VER, WL127X_SUBTYPE_VER,
39877diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
39878index 8d8c1f8..e754844 100644
39879--- a/drivers/net/wireless/ti/wl18xx/main.c
39880+++ b/drivers/net/wireless/ti/wl18xx/main.c
39881@@ -1489,8 +1489,10 @@ static int wl18xx_setup(struct wl1271 *wl)
39882 }
39883
39884 if (!checksum_param) {
39885- wl18xx_ops.set_rx_csum = NULL;
39886- wl18xx_ops.init_vif = NULL;
39887+ pax_open_kernel();
39888+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
39889+ *(void **)&wl18xx_ops.init_vif = NULL;
39890+ pax_close_kernel();
39891 }
39892
39893 /* Enable 11a Band only if we have 5G antennas */
39894diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39895index d93b2b6..ae50401 100644
39896--- a/drivers/oprofile/buffer_sync.c
39897+++ b/drivers/oprofile/buffer_sync.c
39898@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39899 if (cookie == NO_COOKIE)
39900 offset = pc;
39901 if (cookie == INVALID_COOKIE) {
39902- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39903+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39904 offset = pc;
39905 }
39906 if (cookie != last_cookie) {
39907@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39908 /* add userspace sample */
39909
39910 if (!mm) {
39911- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39912+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39913 return 0;
39914 }
39915
39916 cookie = lookup_dcookie(mm, s->eip, &offset);
39917
39918 if (cookie == INVALID_COOKIE) {
39919- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39920+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39921 return 0;
39922 }
39923
39924@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
39925 /* ignore backtraces if failed to add a sample */
39926 if (state == sb_bt_start) {
39927 state = sb_bt_ignore;
39928- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39929+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39930 }
39931 }
39932 release_mm(mm);
39933diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39934index c0cc4e7..44d4e54 100644
39935--- a/drivers/oprofile/event_buffer.c
39936+++ b/drivers/oprofile/event_buffer.c
39937@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39938 }
39939
39940 if (buffer_pos == buffer_size) {
39941- atomic_inc(&oprofile_stats.event_lost_overflow);
39942+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39943 return;
39944 }
39945
39946diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39947index ed2c3ec..deda85a 100644
39948--- a/drivers/oprofile/oprof.c
39949+++ b/drivers/oprofile/oprof.c
39950@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39951 if (oprofile_ops.switch_events())
39952 return;
39953
39954- atomic_inc(&oprofile_stats.multiplex_counter);
39955+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39956 start_switch_worker();
39957 }
39958
39959diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39960index 917d28e..d62d981 100644
39961--- a/drivers/oprofile/oprofile_stats.c
39962+++ b/drivers/oprofile/oprofile_stats.c
39963@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39964 cpu_buf->sample_invalid_eip = 0;
39965 }
39966
39967- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39968- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39969- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39970- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39971- atomic_set(&oprofile_stats.multiplex_counter, 0);
39972+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39973+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39974+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39975+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39976+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39977 }
39978
39979
39980diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39981index 38b6fc0..b5cbfce 100644
39982--- a/drivers/oprofile/oprofile_stats.h
39983+++ b/drivers/oprofile/oprofile_stats.h
39984@@ -13,11 +13,11 @@
39985 #include <linux/atomic.h>
39986
39987 struct oprofile_stat_struct {
39988- atomic_t sample_lost_no_mm;
39989- atomic_t sample_lost_no_mapping;
39990- atomic_t bt_lost_no_mapping;
39991- atomic_t event_lost_overflow;
39992- atomic_t multiplex_counter;
39993+ atomic_unchecked_t sample_lost_no_mm;
39994+ atomic_unchecked_t sample_lost_no_mapping;
39995+ atomic_unchecked_t bt_lost_no_mapping;
39996+ atomic_unchecked_t event_lost_overflow;
39997+ atomic_unchecked_t multiplex_counter;
39998 };
39999
40000 extern struct oprofile_stat_struct oprofile_stats;
40001diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
40002index 849357c..b83c1e0 100644
40003--- a/drivers/oprofile/oprofilefs.c
40004+++ b/drivers/oprofile/oprofilefs.c
40005@@ -185,7 +185,7 @@ static const struct file_operations atomic_ro_fops = {
40006
40007
40008 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
40009- char const *name, atomic_t *val)
40010+ char const *name, atomic_unchecked_t *val)
40011 {
40012 return __oprofilefs_create_file(sb, root, name,
40013 &atomic_ro_fops, 0444, val);
40014diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
40015index 93404f7..4a313d8 100644
40016--- a/drivers/oprofile/timer_int.c
40017+++ b/drivers/oprofile/timer_int.c
40018@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
40019 return NOTIFY_OK;
40020 }
40021
40022-static struct notifier_block __refdata oprofile_cpu_notifier = {
40023+static struct notifier_block oprofile_cpu_notifier = {
40024 .notifier_call = oprofile_cpu_notify,
40025 };
40026
40027diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
40028index 3f56bc0..707d642 100644
40029--- a/drivers/parport/procfs.c
40030+++ b/drivers/parport/procfs.c
40031@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
40032
40033 *ppos += len;
40034
40035- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
40036+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
40037 }
40038
40039 #ifdef CONFIG_PARPORT_1284
40040@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
40041
40042 *ppos += len;
40043
40044- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
40045+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
40046 }
40047 #endif /* IEEE1284.3 support. */
40048
40049diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
40050index c35e8ad..fc33beb 100644
40051--- a/drivers/pci/hotplug/acpiphp_ibm.c
40052+++ b/drivers/pci/hotplug/acpiphp_ibm.c
40053@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
40054 goto init_cleanup;
40055 }
40056
40057- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40058+ pax_open_kernel();
40059+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
40060+ pax_close_kernel();
40061 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
40062
40063 return retval;
40064diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
40065index a6a71c4..c91097b 100644
40066--- a/drivers/pci/hotplug/cpcihp_generic.c
40067+++ b/drivers/pci/hotplug/cpcihp_generic.c
40068@@ -73,7 +73,6 @@ static u16 port;
40069 static unsigned int enum_bit;
40070 static u8 enum_mask;
40071
40072-static struct cpci_hp_controller_ops generic_hpc_ops;
40073 static struct cpci_hp_controller generic_hpc;
40074
40075 static int __init validate_parameters(void)
40076@@ -139,6 +138,10 @@ static int query_enum(void)
40077 return ((value & enum_mask) == enum_mask);
40078 }
40079
40080+static struct cpci_hp_controller_ops generic_hpc_ops = {
40081+ .query_enum = query_enum,
40082+};
40083+
40084 static int __init cpcihp_generic_init(void)
40085 {
40086 int status;
40087@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
40088 pci_dev_put(dev);
40089
40090 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
40091- generic_hpc_ops.query_enum = query_enum;
40092 generic_hpc.ops = &generic_hpc_ops;
40093
40094 status = cpci_hp_register_controller(&generic_hpc);
40095diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
40096index 449b4bb..257e2e8 100644
40097--- a/drivers/pci/hotplug/cpcihp_zt5550.c
40098+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
40099@@ -59,7 +59,6 @@
40100 /* local variables */
40101 static bool debug;
40102 static bool poll;
40103-static struct cpci_hp_controller_ops zt5550_hpc_ops;
40104 static struct cpci_hp_controller zt5550_hpc;
40105
40106 /* Primary cPCI bus bridge device */
40107@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
40108 return 0;
40109 }
40110
40111+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
40112+ .query_enum = zt5550_hc_query_enum,
40113+};
40114+
40115 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
40116 {
40117 int status;
40118@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
40119 dbg("returned from zt5550_hc_config");
40120
40121 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
40122- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
40123 zt5550_hpc.ops = &zt5550_hpc_ops;
40124 if(!poll) {
40125 zt5550_hpc.irq = hc_dev->irq;
40126 zt5550_hpc.irq_flags = IRQF_SHARED;
40127 zt5550_hpc.dev_id = hc_dev;
40128
40129- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40130- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40131- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40132+ pax_open_kernel();
40133+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
40134+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
40135+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
40136+ pax_open_kernel();
40137 } else {
40138 info("using ENUM# polling mode");
40139 }
40140diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
40141index 76ba8a1..20ca857 100644
40142--- a/drivers/pci/hotplug/cpqphp_nvram.c
40143+++ b/drivers/pci/hotplug/cpqphp_nvram.c
40144@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
40145
40146 void compaq_nvram_init (void __iomem *rom_start)
40147 {
40148+
40149+#ifndef CONFIG_PAX_KERNEXEC
40150 if (rom_start) {
40151 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
40152 }
40153+#endif
40154+
40155 dbg("int15 entry = %p\n", compaq_int15_entry_point);
40156
40157 /* initialize our int15 lock */
40158diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
40159index 202f4a9..8ee47d0 100644
40160--- a/drivers/pci/hotplug/pci_hotplug_core.c
40161+++ b/drivers/pci/hotplug/pci_hotplug_core.c
40162@@ -448,8 +448,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
40163 return -EINVAL;
40164 }
40165
40166- slot->ops->owner = owner;
40167- slot->ops->mod_name = mod_name;
40168+ pax_open_kernel();
40169+ *(struct module **)&slot->ops->owner = owner;
40170+ *(const char **)&slot->ops->mod_name = mod_name;
40171+ pax_close_kernel();
40172
40173 mutex_lock(&pci_hp_mutex);
40174 /*
40175diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
40176index 939bd1d..a1459c9 100644
40177--- a/drivers/pci/hotplug/pciehp_core.c
40178+++ b/drivers/pci/hotplug/pciehp_core.c
40179@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
40180 struct slot *slot = ctrl->slot;
40181 struct hotplug_slot *hotplug = NULL;
40182 struct hotplug_slot_info *info = NULL;
40183- struct hotplug_slot_ops *ops = NULL;
40184+ hotplug_slot_ops_no_const *ops = NULL;
40185 char name[SLOT_NAME_SIZE];
40186 int retval = -ENOMEM;
40187
40188diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
40189index 9c6e9bb..2916736 100644
40190--- a/drivers/pci/pci-sysfs.c
40191+++ b/drivers/pci/pci-sysfs.c
40192@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
40193 {
40194 /* allocate attribute structure, piggyback attribute name */
40195 int name_len = write_combine ? 13 : 10;
40196- struct bin_attribute *res_attr;
40197+ bin_attribute_no_const *res_attr;
40198 int retval;
40199
40200 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
40201@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
40202 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
40203 {
40204 int retval;
40205- struct bin_attribute *attr;
40206+ bin_attribute_no_const *attr;
40207
40208 /* If the device has VPD, try to expose it in sysfs. */
40209 if (dev->vpd) {
40210@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
40211 {
40212 int retval;
40213 int rom_size = 0;
40214- struct bin_attribute *attr;
40215+ bin_attribute_no_const *attr;
40216
40217 if (!sysfs_initialized)
40218 return -EACCES;
40219diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
40220index e851829..a1a7196 100644
40221--- a/drivers/pci/pci.h
40222+++ b/drivers/pci/pci.h
40223@@ -98,7 +98,7 @@ struct pci_vpd_ops {
40224 struct pci_vpd {
40225 unsigned int len;
40226 const struct pci_vpd_ops *ops;
40227- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
40228+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
40229 };
40230
40231 extern int pci_vpd_pci22_init(struct pci_dev *dev);
40232diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40233index 8474b6a..ee81993 100644
40234--- a/drivers/pci/pcie/aspm.c
40235+++ b/drivers/pci/pcie/aspm.c
40236@@ -27,9 +27,9 @@
40237 #define MODULE_PARAM_PREFIX "pcie_aspm."
40238
40239 /* Note: those are not register definitions */
40240-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40241-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40242-#define ASPM_STATE_L1 (4) /* L1 state */
40243+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40244+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40245+#define ASPM_STATE_L1 (4U) /* L1 state */
40246 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40247 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40248
40249diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40250index 6186f03..1a78714 100644
40251--- a/drivers/pci/probe.c
40252+++ b/drivers/pci/probe.c
40253@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
40254 struct pci_bus_region region;
40255 bool bar_too_big = false, bar_disabled = false;
40256
40257- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
40258+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
40259
40260 /* No printks while decoding is disabled! */
40261 if (!dev->mmio_always_on) {
40262diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40263index 9b8505c..f00870a 100644
40264--- a/drivers/pci/proc.c
40265+++ b/drivers/pci/proc.c
40266@@ -465,7 +465,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40267 static int __init pci_proc_init(void)
40268 {
40269 struct pci_dev *dev = NULL;
40270+
40271+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40272+#ifdef CONFIG_GRKERNSEC_PROC_USER
40273+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40274+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40275+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40276+#endif
40277+#else
40278 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40279+#endif
40280 proc_create("devices", 0, proc_bus_pci_dir,
40281 &proc_bus_pci_dev_operations);
40282 proc_initialized = 1;
40283diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40284index 2111dbb..79e434b 100644
40285--- a/drivers/platform/x86/msi-laptop.c
40286+++ b/drivers/platform/x86/msi-laptop.c
40287@@ -820,12 +820,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
40288 int result;
40289
40290 /* allow userland write sysfs file */
40291- dev_attr_bluetooth.store = store_bluetooth;
40292- dev_attr_wlan.store = store_wlan;
40293- dev_attr_threeg.store = store_threeg;
40294- dev_attr_bluetooth.attr.mode |= S_IWUSR;
40295- dev_attr_wlan.attr.mode |= S_IWUSR;
40296- dev_attr_threeg.attr.mode |= S_IWUSR;
40297+ pax_open_kernel();
40298+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
40299+ *(void **)&dev_attr_wlan.store = store_wlan;
40300+ *(void **)&dev_attr_threeg.store = store_threeg;
40301+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
40302+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
40303+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
40304+ pax_close_kernel();
40305
40306 /* disable hardware control by fn key */
40307 result = ec_read(MSI_STANDARD_EC_SCM_LOAD_ADDRESS, &data);
40308diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40309index b8ad71f..3ec9bb4 100644
40310--- a/drivers/platform/x86/sony-laptop.c
40311+++ b/drivers/platform/x86/sony-laptop.c
40312@@ -2356,7 +2356,7 @@ static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
40313 }
40314
40315 /* High speed charging function */
40316-static struct device_attribute *hsc_handle;
40317+static device_attribute_no_const *hsc_handle;
40318
40319 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
40320 struct device_attribute *attr,
40321diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40322index f946ca7..f25c833 100644
40323--- a/drivers/platform/x86/thinkpad_acpi.c
40324+++ b/drivers/platform/x86/thinkpad_acpi.c
40325@@ -2097,7 +2097,7 @@ static int hotkey_mask_get(void)
40326 return 0;
40327 }
40328
40329-void static hotkey_mask_warn_incomplete_mask(void)
40330+static void hotkey_mask_warn_incomplete_mask(void)
40331 {
40332 /* log only what the user can fix... */
40333 const u32 wantedmask = hotkey_driver_mask &
40334@@ -2328,11 +2328,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
40335 }
40336 }
40337
40338-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40339- struct tp_nvram_state *newn,
40340- const u32 event_mask)
40341-{
40342-
40343 #define TPACPI_COMPARE_KEY(__scancode, __member) \
40344 do { \
40345 if ((event_mask & (1 << __scancode)) && \
40346@@ -2346,36 +2341,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40347 tpacpi_hotkey_send_key(__scancode); \
40348 } while (0)
40349
40350- void issue_volchange(const unsigned int oldvol,
40351- const unsigned int newvol)
40352- {
40353- unsigned int i = oldvol;
40354+static void issue_volchange(const unsigned int oldvol,
40355+ const unsigned int newvol,
40356+ const u32 event_mask)
40357+{
40358+ unsigned int i = oldvol;
40359
40360- while (i > newvol) {
40361- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40362- i--;
40363- }
40364- while (i < newvol) {
40365- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40366- i++;
40367- }
40368+ while (i > newvol) {
40369+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
40370+ i--;
40371 }
40372+ while (i < newvol) {
40373+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40374+ i++;
40375+ }
40376+}
40377
40378- void issue_brightnesschange(const unsigned int oldbrt,
40379- const unsigned int newbrt)
40380- {
40381- unsigned int i = oldbrt;
40382+static void issue_brightnesschange(const unsigned int oldbrt,
40383+ const unsigned int newbrt,
40384+ const u32 event_mask)
40385+{
40386+ unsigned int i = oldbrt;
40387
40388- while (i > newbrt) {
40389- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40390- i--;
40391- }
40392- while (i < newbrt) {
40393- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40394- i++;
40395- }
40396+ while (i > newbrt) {
40397+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
40398+ i--;
40399+ }
40400+ while (i < newbrt) {
40401+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40402+ i++;
40403 }
40404+}
40405
40406+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40407+ struct tp_nvram_state *newn,
40408+ const u32 event_mask)
40409+{
40410 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
40411 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
40412 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
40413@@ -2409,7 +2410,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40414 oldn->volume_level != newn->volume_level) {
40415 /* recently muted, or repeated mute keypress, or
40416 * multiple presses ending in mute */
40417- issue_volchange(oldn->volume_level, newn->volume_level);
40418+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40419 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
40420 }
40421 } else {
40422@@ -2419,7 +2420,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40423 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
40424 }
40425 if (oldn->volume_level != newn->volume_level) {
40426- issue_volchange(oldn->volume_level, newn->volume_level);
40427+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
40428 } else if (oldn->volume_toggle != newn->volume_toggle) {
40429 /* repeated vol up/down keypress at end of scale ? */
40430 if (newn->volume_level == 0)
40431@@ -2432,7 +2433,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40432 /* handle brightness */
40433 if (oldn->brightness_level != newn->brightness_level) {
40434 issue_brightnesschange(oldn->brightness_level,
40435- newn->brightness_level);
40436+ newn->brightness_level,
40437+ event_mask);
40438 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
40439 /* repeated key presses that didn't change state */
40440 if (newn->brightness_level == 0)
40441@@ -2441,10 +2443,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
40442 && !tp_features.bright_unkfw)
40443 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
40444 }
40445+}
40446
40447 #undef TPACPI_COMPARE_KEY
40448 #undef TPACPI_MAY_SEND_KEY
40449-}
40450
40451 /*
40452 * Polling driver
40453diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40454index 769d265..a3a05ca 100644
40455--- a/drivers/pnp/pnpbios/bioscalls.c
40456+++ b/drivers/pnp/pnpbios/bioscalls.c
40457@@ -58,7 +58,7 @@ do { \
40458 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40459 } while(0)
40460
40461-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40462+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40463 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40464
40465 /*
40466@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40467
40468 cpu = get_cpu();
40469 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40470+
40471+ pax_open_kernel();
40472 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40473+ pax_close_kernel();
40474
40475 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40476 spin_lock_irqsave(&pnp_bios_lock, flags);
40477@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40478 :"memory");
40479 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40480
40481+ pax_open_kernel();
40482 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40483+ pax_close_kernel();
40484+
40485 put_cpu();
40486
40487 /* If we get here and this is set then the PnP BIOS faulted on us. */
40488@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40489 return status;
40490 }
40491
40492-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40493+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40494 {
40495 int i;
40496
40497@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40498 pnp_bios_callpoint.offset = header->fields.pm16offset;
40499 pnp_bios_callpoint.segment = PNP_CS16;
40500
40501+ pax_open_kernel();
40502+
40503 for_each_possible_cpu(i) {
40504 struct desc_struct *gdt = get_cpu_gdt_table(i);
40505 if (!gdt)
40506@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40507 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40508 (unsigned long)__va(header->fields.pm16dseg));
40509 }
40510+
40511+ pax_close_kernel();
40512 }
40513diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40514index 3e6db1c..1fbbdae 100644
40515--- a/drivers/pnp/resource.c
40516+++ b/drivers/pnp/resource.c
40517@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40518 return 1;
40519
40520 /* check if the resource is valid */
40521- if (*irq < 0 || *irq > 15)
40522+ if (*irq > 15)
40523 return 0;
40524
40525 /* check if the resource is reserved */
40526@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40527 return 1;
40528
40529 /* check if the resource is valid */
40530- if (*dma < 0 || *dma == 4 || *dma > 7)
40531+ if (*dma == 4 || *dma > 7)
40532 return 0;
40533
40534 /* check if the resource is reserved */
40535diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
40536index 7df7c5f..bd48c47 100644
40537--- a/drivers/power/pda_power.c
40538+++ b/drivers/power/pda_power.c
40539@@ -37,7 +37,11 @@ static int polling;
40540
40541 #ifdef CONFIG_USB_OTG_UTILS
40542 static struct usb_phy *transceiver;
40543-static struct notifier_block otg_nb;
40544+static int otg_handle_notification(struct notifier_block *nb,
40545+ unsigned long event, void *unused);
40546+static struct notifier_block otg_nb = {
40547+ .notifier_call = otg_handle_notification
40548+};
40549 #endif
40550
40551 static struct regulator *ac_draw;
40552@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
40553
40554 #ifdef CONFIG_USB_OTG_UTILS
40555 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
40556- otg_nb.notifier_call = otg_handle_notification;
40557 ret = usb_register_notifier(transceiver, &otg_nb);
40558 if (ret) {
40559 dev_err(dev, "failure to register otg notifier\n");
40560diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
40561index cc439fd..8fa30df 100644
40562--- a/drivers/power/power_supply.h
40563+++ b/drivers/power/power_supply.h
40564@@ -16,12 +16,12 @@ struct power_supply;
40565
40566 #ifdef CONFIG_SYSFS
40567
40568-extern void power_supply_init_attrs(struct device_type *dev_type);
40569+extern void power_supply_init_attrs(void);
40570 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
40571
40572 #else
40573
40574-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
40575+static inline void power_supply_init_attrs(void) {}
40576 #define power_supply_uevent NULL
40577
40578 #endif /* CONFIG_SYSFS */
40579diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
40580index 8a7cfb3..493e0a2 100644
40581--- a/drivers/power/power_supply_core.c
40582+++ b/drivers/power/power_supply_core.c
40583@@ -24,7 +24,10 @@
40584 struct class *power_supply_class;
40585 EXPORT_SYMBOL_GPL(power_supply_class);
40586
40587-static struct device_type power_supply_dev_type;
40588+extern const struct attribute_group *power_supply_attr_groups[];
40589+static struct device_type power_supply_dev_type = {
40590+ .groups = power_supply_attr_groups,
40591+};
40592
40593 static int __power_supply_changed_work(struct device *dev, void *data)
40594 {
40595@@ -393,7 +396,6 @@ static int __init power_supply_class_init(void)
40596 return PTR_ERR(power_supply_class);
40597
40598 power_supply_class->dev_uevent = power_supply_uevent;
40599- power_supply_init_attrs(&power_supply_dev_type);
40600
40601 return 0;
40602 }
40603diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
40604index 40fa3b7..d9c2e0e 100644
40605--- a/drivers/power/power_supply_sysfs.c
40606+++ b/drivers/power/power_supply_sysfs.c
40607@@ -229,17 +229,15 @@ static struct attribute_group power_supply_attr_group = {
40608 .is_visible = power_supply_attr_is_visible,
40609 };
40610
40611-static const struct attribute_group *power_supply_attr_groups[] = {
40612+const struct attribute_group *power_supply_attr_groups[] = {
40613 &power_supply_attr_group,
40614 NULL,
40615 };
40616
40617-void power_supply_init_attrs(struct device_type *dev_type)
40618+void power_supply_init_attrs(void)
40619 {
40620 int i;
40621
40622- dev_type->groups = power_supply_attr_groups;
40623-
40624 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
40625 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
40626 }
40627diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
40628index 4d7c635..9860196 100644
40629--- a/drivers/regulator/max8660.c
40630+++ b/drivers/regulator/max8660.c
40631@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
40632 max8660->shadow_regs[MAX8660_OVER1] = 5;
40633 } else {
40634 /* Otherwise devices can be toggled via software */
40635- max8660_dcdc_ops.enable = max8660_dcdc_enable;
40636- max8660_dcdc_ops.disable = max8660_dcdc_disable;
40637+ pax_open_kernel();
40638+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
40639+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
40640+ pax_close_kernel();
40641 }
40642
40643 /*
40644diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
40645index 9a8ea91..c483dd9 100644
40646--- a/drivers/regulator/max8973-regulator.c
40647+++ b/drivers/regulator/max8973-regulator.c
40648@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
40649 if (!pdata->enable_ext_control) {
40650 max->desc.enable_reg = MAX8973_VOUT;
40651 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
40652- max8973_dcdc_ops.enable = regulator_enable_regmap;
40653- max8973_dcdc_ops.disable = regulator_disable_regmap;
40654- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
40655+ pax_open_kernel();
40656+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
40657+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
40658+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
40659+ pax_close_kernel();
40660 }
40661
40662 max->enable_external_control = pdata->enable_ext_control;
40663diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
40664index 0d84b1f..c2da6ac 100644
40665--- a/drivers/regulator/mc13892-regulator.c
40666+++ b/drivers/regulator/mc13892-regulator.c
40667@@ -540,10 +540,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
40668 }
40669 mc13xxx_unlock(mc13892);
40670
40671- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
40672+ pax_open_kernel();
40673+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
40674 = mc13892_vcam_set_mode;
40675- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
40676+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
40677 = mc13892_vcam_get_mode;
40678+ pax_close_kernel();
40679
40680 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
40681 ARRAY_SIZE(mc13892_regulators));
40682diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
40683index 16630aa..6afc992 100644
40684--- a/drivers/rtc/rtc-cmos.c
40685+++ b/drivers/rtc/rtc-cmos.c
40686@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
40687 hpet_rtc_timer_init();
40688
40689 /* export at least the first block of NVRAM */
40690- nvram.size = address_space - NVRAM_OFFSET;
40691+ pax_open_kernel();
40692+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
40693+ pax_close_kernel();
40694 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
40695 if (retval < 0) {
40696 dev_dbg(dev, "can't create nvram file? %d\n", retval);
40697diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40698index 9a86b4b..3a383dc 100644
40699--- a/drivers/rtc/rtc-dev.c
40700+++ b/drivers/rtc/rtc-dev.c
40701@@ -14,6 +14,7 @@
40702 #include <linux/module.h>
40703 #include <linux/rtc.h>
40704 #include <linux/sched.h>
40705+#include <linux/grsecurity.h>
40706 #include "rtc-core.h"
40707
40708 static dev_t rtc_devt;
40709@@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *file,
40710 if (copy_from_user(&tm, uarg, sizeof(tm)))
40711 return -EFAULT;
40712
40713+ gr_log_timechange();
40714+
40715 return rtc_set_time(rtc, &tm);
40716
40717 case RTC_PIE_ON:
40718diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
40719index e0d0ba4..3c65868 100644
40720--- a/drivers/rtc/rtc-ds1307.c
40721+++ b/drivers/rtc/rtc-ds1307.c
40722@@ -106,7 +106,7 @@ struct ds1307 {
40723 u8 offset; /* register's offset */
40724 u8 regs[11];
40725 u16 nvram_offset;
40726- struct bin_attribute *nvram;
40727+ bin_attribute_no_const *nvram;
40728 enum ds_type type;
40729 unsigned long flags;
40730 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
40731diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
40732index 130f29a..6179d03 100644
40733--- a/drivers/rtc/rtc-m48t59.c
40734+++ b/drivers/rtc/rtc-m48t59.c
40735@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
40736 goto out;
40737 }
40738
40739- m48t59_nvram_attr.size = pdata->offset;
40740+ pax_open_kernel();
40741+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
40742+ pax_close_kernel();
40743
40744 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
40745 if (ret) {
40746diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
40747index e693af6..2e525b6 100644
40748--- a/drivers/scsi/bfa/bfa_fcpim.h
40749+++ b/drivers/scsi/bfa/bfa_fcpim.h
40750@@ -36,7 +36,7 @@ struct bfa_iotag_s {
40751
40752 struct bfa_itn_s {
40753 bfa_isr_func_t isr;
40754-};
40755+} __no_const;
40756
40757 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
40758 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
40759diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40760index 23a90e7..9cf04ee 100644
40761--- a/drivers/scsi/bfa/bfa_ioc.h
40762+++ b/drivers/scsi/bfa/bfa_ioc.h
40763@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
40764 bfa_ioc_disable_cbfn_t disable_cbfn;
40765 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40766 bfa_ioc_reset_cbfn_t reset_cbfn;
40767-};
40768+} __no_const;
40769
40770 /*
40771 * IOC event notification mechanism.
40772@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
40773 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
40774 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
40775 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
40776-};
40777+} __no_const;
40778
40779 /*
40780 * Queue element to wait for room in request queue. FIFO order is
40781diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40782index 593085a..47aa999 100644
40783--- a/drivers/scsi/hosts.c
40784+++ b/drivers/scsi/hosts.c
40785@@ -42,7 +42,7 @@
40786 #include "scsi_logging.h"
40787
40788
40789-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
40790+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
40791
40792
40793 static void scsi_host_cls_release(struct device *dev)
40794@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40795 * subtract one because we increment first then return, but we need to
40796 * know what the next host number was before increment
40797 */
40798- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40799+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40800 shost->dma_channel = 0xff;
40801
40802 /* These three are default values which can be overridden */
40803diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
40804index 4f33806..afd6f60 100644
40805--- a/drivers/scsi/hpsa.c
40806+++ b/drivers/scsi/hpsa.c
40807@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
40808 unsigned long flags;
40809
40810 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
40811- return h->access.command_completed(h, q);
40812+ return h->access->command_completed(h, q);
40813
40814 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
40815 a = rq->head[rq->current_entry];
40816@@ -3374,7 +3374,7 @@ static void start_io(struct ctlr_info *h)
40817 while (!list_empty(&h->reqQ)) {
40818 c = list_entry(h->reqQ.next, struct CommandList, list);
40819 /* can't do anything if fifo is full */
40820- if ((h->access.fifo_full(h))) {
40821+ if ((h->access->fifo_full(h))) {
40822 dev_warn(&h->pdev->dev, "fifo full\n");
40823 break;
40824 }
40825@@ -3396,7 +3396,7 @@ static void start_io(struct ctlr_info *h)
40826
40827 /* Tell the controller execute command */
40828 spin_unlock_irqrestore(&h->lock, flags);
40829- h->access.submit_command(h, c);
40830+ h->access->submit_command(h, c);
40831 spin_lock_irqsave(&h->lock, flags);
40832 }
40833 spin_unlock_irqrestore(&h->lock, flags);
40834@@ -3404,17 +3404,17 @@ static void start_io(struct ctlr_info *h)
40835
40836 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
40837 {
40838- return h->access.command_completed(h, q);
40839+ return h->access->command_completed(h, q);
40840 }
40841
40842 static inline bool interrupt_pending(struct ctlr_info *h)
40843 {
40844- return h->access.intr_pending(h);
40845+ return h->access->intr_pending(h);
40846 }
40847
40848 static inline long interrupt_not_for_us(struct ctlr_info *h)
40849 {
40850- return (h->access.intr_pending(h) == 0) ||
40851+ return (h->access->intr_pending(h) == 0) ||
40852 (h->interrupts_enabled == 0);
40853 }
40854
40855@@ -4316,7 +4316,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
40856 if (prod_index < 0)
40857 return -ENODEV;
40858 h->product_name = products[prod_index].product_name;
40859- h->access = *(products[prod_index].access);
40860+ h->access = products[prod_index].access;
40861
40862 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
40863 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
40864@@ -4598,7 +4598,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
40865
40866 assert_spin_locked(&lockup_detector_lock);
40867 remove_ctlr_from_lockup_detector_list(h);
40868- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40869+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40870 spin_lock_irqsave(&h->lock, flags);
40871 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
40872 spin_unlock_irqrestore(&h->lock, flags);
40873@@ -4775,7 +4775,7 @@ reinit_after_soft_reset:
40874 }
40875
40876 /* make sure the board interrupts are off */
40877- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40878+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40879
40880 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
40881 goto clean2;
40882@@ -4809,7 +4809,7 @@ reinit_after_soft_reset:
40883 * fake ones to scoop up any residual completions.
40884 */
40885 spin_lock_irqsave(&h->lock, flags);
40886- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40887+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40888 spin_unlock_irqrestore(&h->lock, flags);
40889 free_irqs(h);
40890 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
40891@@ -4828,9 +4828,9 @@ reinit_after_soft_reset:
40892 dev_info(&h->pdev->dev, "Board READY.\n");
40893 dev_info(&h->pdev->dev,
40894 "Waiting for stale completions to drain.\n");
40895- h->access.set_intr_mask(h, HPSA_INTR_ON);
40896+ h->access->set_intr_mask(h, HPSA_INTR_ON);
40897 msleep(10000);
40898- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40899+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40900
40901 rc = controller_reset_failed(h->cfgtable);
40902 if (rc)
40903@@ -4851,7 +4851,7 @@ reinit_after_soft_reset:
40904 }
40905
40906 /* Turn the interrupts on so we can service requests */
40907- h->access.set_intr_mask(h, HPSA_INTR_ON);
40908+ h->access->set_intr_mask(h, HPSA_INTR_ON);
40909
40910 hpsa_hba_inquiry(h);
40911 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
40912@@ -4903,7 +4903,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
40913 * To write all data in the battery backed cache to disks
40914 */
40915 hpsa_flush_cache(h);
40916- h->access.set_intr_mask(h, HPSA_INTR_OFF);
40917+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
40918 hpsa_free_irqs_and_disable_msix(h);
40919 }
40920
40921@@ -5071,7 +5071,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
40922 return;
40923 }
40924 /* Change the access methods to the performant access methods */
40925- h->access = SA5_performant_access;
40926+ h->access = &SA5_performant_access;
40927 h->transMethod = CFGTBL_Trans_Performant;
40928 }
40929
40930diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
40931index 9816479..c5d4e97 100644
40932--- a/drivers/scsi/hpsa.h
40933+++ b/drivers/scsi/hpsa.h
40934@@ -79,7 +79,7 @@ struct ctlr_info {
40935 unsigned int msix_vector;
40936 unsigned int msi_vector;
40937 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
40938- struct access_method access;
40939+ struct access_method *access;
40940
40941 /* queue and queue Info */
40942 struct list_head reqQ;
40943diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40944index c772d8d..35c362c 100644
40945--- a/drivers/scsi/libfc/fc_exch.c
40946+++ b/drivers/scsi/libfc/fc_exch.c
40947@@ -100,12 +100,12 @@ struct fc_exch_mgr {
40948 u16 pool_max_index;
40949
40950 struct {
40951- atomic_t no_free_exch;
40952- atomic_t no_free_exch_xid;
40953- atomic_t xid_not_found;
40954- atomic_t xid_busy;
40955- atomic_t seq_not_found;
40956- atomic_t non_bls_resp;
40957+ atomic_unchecked_t no_free_exch;
40958+ atomic_unchecked_t no_free_exch_xid;
40959+ atomic_unchecked_t xid_not_found;
40960+ atomic_unchecked_t xid_busy;
40961+ atomic_unchecked_t seq_not_found;
40962+ atomic_unchecked_t non_bls_resp;
40963 } stats;
40964 };
40965
40966@@ -725,7 +725,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40967 /* allocate memory for exchange */
40968 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40969 if (!ep) {
40970- atomic_inc(&mp->stats.no_free_exch);
40971+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40972 goto out;
40973 }
40974 memset(ep, 0, sizeof(*ep));
40975@@ -786,7 +786,7 @@ out:
40976 return ep;
40977 err:
40978 spin_unlock_bh(&pool->lock);
40979- atomic_inc(&mp->stats.no_free_exch_xid);
40980+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40981 mempool_free(ep, mp->ep_pool);
40982 return NULL;
40983 }
40984@@ -929,7 +929,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40985 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40986 ep = fc_exch_find(mp, xid);
40987 if (!ep) {
40988- atomic_inc(&mp->stats.xid_not_found);
40989+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40990 reject = FC_RJT_OX_ID;
40991 goto out;
40992 }
40993@@ -959,7 +959,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40994 ep = fc_exch_find(mp, xid);
40995 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40996 if (ep) {
40997- atomic_inc(&mp->stats.xid_busy);
40998+ atomic_inc_unchecked(&mp->stats.xid_busy);
40999 reject = FC_RJT_RX_ID;
41000 goto rel;
41001 }
41002@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41003 }
41004 xid = ep->xid; /* get our XID */
41005 } else if (!ep) {
41006- atomic_inc(&mp->stats.xid_not_found);
41007+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41008 reject = FC_RJT_RX_ID; /* XID not found */
41009 goto out;
41010 }
41011@@ -987,7 +987,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
41012 } else {
41013 sp = &ep->seq;
41014 if (sp->id != fh->fh_seq_id) {
41015- atomic_inc(&mp->stats.seq_not_found);
41016+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41017 if (f_ctl & FC_FC_END_SEQ) {
41018 /*
41019 * Update sequence_id based on incoming last
41020@@ -1437,22 +1437,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41021
41022 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
41023 if (!ep) {
41024- atomic_inc(&mp->stats.xid_not_found);
41025+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41026 goto out;
41027 }
41028 if (ep->esb_stat & ESB_ST_COMPLETE) {
41029- atomic_inc(&mp->stats.xid_not_found);
41030+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41031 goto rel;
41032 }
41033 if (ep->rxid == FC_XID_UNKNOWN)
41034 ep->rxid = ntohs(fh->fh_rx_id);
41035 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
41036- atomic_inc(&mp->stats.xid_not_found);
41037+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41038 goto rel;
41039 }
41040 if (ep->did != ntoh24(fh->fh_s_id) &&
41041 ep->did != FC_FID_FLOGI) {
41042- atomic_inc(&mp->stats.xid_not_found);
41043+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41044 goto rel;
41045 }
41046 sof = fr_sof(fp);
41047@@ -1461,7 +1461,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41048 sp->ssb_stat |= SSB_ST_RESP;
41049 sp->id = fh->fh_seq_id;
41050 } else if (sp->id != fh->fh_seq_id) {
41051- atomic_inc(&mp->stats.seq_not_found);
41052+ atomic_inc_unchecked(&mp->stats.seq_not_found);
41053 goto rel;
41054 }
41055
41056@@ -1525,9 +1525,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
41057 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
41058
41059 if (!sp)
41060- atomic_inc(&mp->stats.xid_not_found);
41061+ atomic_inc_unchecked(&mp->stats.xid_not_found);
41062 else
41063- atomic_inc(&mp->stats.non_bls_resp);
41064+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
41065
41066 fc_frame_free(fp);
41067 }
41068@@ -2174,13 +2174,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
41069
41070 list_for_each_entry(ema, &lport->ema_list, ema_list) {
41071 mp = ema->mp;
41072- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
41073+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
41074 st->fc_no_free_exch_xid +=
41075- atomic_read(&mp->stats.no_free_exch_xid);
41076- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
41077- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
41078- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
41079- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
41080+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
41081+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
41082+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
41083+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
41084+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
41085 }
41086 }
41087 EXPORT_SYMBOL(fc_exch_update_stats);
41088diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
41089index bdb81cd..d3c7c2c 100644
41090--- a/drivers/scsi/libsas/sas_ata.c
41091+++ b/drivers/scsi/libsas/sas_ata.c
41092@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
41093 .postreset = ata_std_postreset,
41094 .error_handler = ata_std_error_handler,
41095 .post_internal_cmd = sas_ata_post_internal,
41096- .qc_defer = ata_std_qc_defer,
41097+ .qc_defer = ata_std_qc_defer,
41098 .qc_prep = ata_noop_qc_prep,
41099 .qc_issue = sas_ata_qc_issue,
41100 .qc_fill_rtf = sas_ata_qc_fill_rtf,
41101diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
41102index df4c13a..a51e90c 100644
41103--- a/drivers/scsi/lpfc/lpfc.h
41104+++ b/drivers/scsi/lpfc/lpfc.h
41105@@ -424,7 +424,7 @@ struct lpfc_vport {
41106 struct dentry *debug_nodelist;
41107 struct dentry *vport_debugfs_root;
41108 struct lpfc_debugfs_trc *disc_trc;
41109- atomic_t disc_trc_cnt;
41110+ atomic_unchecked_t disc_trc_cnt;
41111 #endif
41112 uint8_t stat_data_enabled;
41113 uint8_t stat_data_blocked;
41114@@ -842,8 +842,8 @@ struct lpfc_hba {
41115 struct timer_list fabric_block_timer;
41116 unsigned long bit_flags;
41117 #define FABRIC_COMANDS_BLOCKED 0
41118- atomic_t num_rsrc_err;
41119- atomic_t num_cmd_success;
41120+ atomic_unchecked_t num_rsrc_err;
41121+ atomic_unchecked_t num_cmd_success;
41122 unsigned long last_rsrc_error_time;
41123 unsigned long last_ramp_down_time;
41124 unsigned long last_ramp_up_time;
41125@@ -879,7 +879,7 @@ struct lpfc_hba {
41126
41127 struct dentry *debug_slow_ring_trc;
41128 struct lpfc_debugfs_trc *slow_ring_trc;
41129- atomic_t slow_ring_trc_cnt;
41130+ atomic_unchecked_t slow_ring_trc_cnt;
41131 /* iDiag debugfs sub-directory */
41132 struct dentry *idiag_root;
41133 struct dentry *idiag_pci_cfg;
41134diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41135index f63f5ff..de29189 100644
41136--- a/drivers/scsi/lpfc/lpfc_debugfs.c
41137+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41138@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
41139
41140 #include <linux/debugfs.h>
41141
41142-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41143+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41144 static unsigned long lpfc_debugfs_start_time = 0L;
41145
41146 /* iDiag */
41147@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41148 lpfc_debugfs_enable = 0;
41149
41150 len = 0;
41151- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41152+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41153 (lpfc_debugfs_max_disc_trc - 1);
41154 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41155 dtp = vport->disc_trc + i;
41156@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41157 lpfc_debugfs_enable = 0;
41158
41159 len = 0;
41160- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41161+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41162 (lpfc_debugfs_max_slow_ring_trc - 1);
41163 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41164 dtp = phba->slow_ring_trc + i;
41165@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41166 !vport || !vport->disc_trc)
41167 return;
41168
41169- index = atomic_inc_return(&vport->disc_trc_cnt) &
41170+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41171 (lpfc_debugfs_max_disc_trc - 1);
41172 dtp = vport->disc_trc + index;
41173 dtp->fmt = fmt;
41174 dtp->data1 = data1;
41175 dtp->data2 = data2;
41176 dtp->data3 = data3;
41177- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41178+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41179 dtp->jif = jiffies;
41180 #endif
41181 return;
41182@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41183 !phba || !phba->slow_ring_trc)
41184 return;
41185
41186- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41187+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41188 (lpfc_debugfs_max_slow_ring_trc - 1);
41189 dtp = phba->slow_ring_trc + index;
41190 dtp->fmt = fmt;
41191 dtp->data1 = data1;
41192 dtp->data2 = data2;
41193 dtp->data3 = data3;
41194- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41195+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41196 dtp->jif = jiffies;
41197 #endif
41198 return;
41199@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41200 "slow_ring buffer\n");
41201 goto debug_failed;
41202 }
41203- atomic_set(&phba->slow_ring_trc_cnt, 0);
41204+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41205 memset(phba->slow_ring_trc, 0,
41206 (sizeof(struct lpfc_debugfs_trc) *
41207 lpfc_debugfs_max_slow_ring_trc));
41208@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41209 "buffer\n");
41210 goto debug_failed;
41211 }
41212- atomic_set(&vport->disc_trc_cnt, 0);
41213+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41214
41215 snprintf(name, sizeof(name), "discovery_trace");
41216 vport->debug_disc_trc =
41217diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41218index 89ad558..76956c4 100644
41219--- a/drivers/scsi/lpfc/lpfc_init.c
41220+++ b/drivers/scsi/lpfc/lpfc_init.c
41221@@ -10618,8 +10618,10 @@ lpfc_init(void)
41222 "misc_register returned with status %d", error);
41223
41224 if (lpfc_enable_npiv) {
41225- lpfc_transport_functions.vport_create = lpfc_vport_create;
41226- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41227+ pax_open_kernel();
41228+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41229+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41230+ pax_close_kernel();
41231 }
41232 lpfc_transport_template =
41233 fc_attach_transport(&lpfc_transport_functions);
41234diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41235index 60e5a17..ff7a793 100644
41236--- a/drivers/scsi/lpfc/lpfc_scsi.c
41237+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41238@@ -305,7 +305,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41239 uint32_t evt_posted;
41240
41241 spin_lock_irqsave(&phba->hbalock, flags);
41242- atomic_inc(&phba->num_rsrc_err);
41243+ atomic_inc_unchecked(&phba->num_rsrc_err);
41244 phba->last_rsrc_error_time = jiffies;
41245
41246 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41247@@ -346,7 +346,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41248 unsigned long flags;
41249 struct lpfc_hba *phba = vport->phba;
41250 uint32_t evt_posted;
41251- atomic_inc(&phba->num_cmd_success);
41252+ atomic_inc_unchecked(&phba->num_cmd_success);
41253
41254 if (vport->cfg_lun_queue_depth <= queue_depth)
41255 return;
41256@@ -390,8 +390,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41257 unsigned long num_rsrc_err, num_cmd_success;
41258 int i;
41259
41260- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41261- num_cmd_success = atomic_read(&phba->num_cmd_success);
41262+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41263+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41264
41265 /*
41266 * The error and success command counters are global per
41267@@ -419,8 +419,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41268 }
41269 }
41270 lpfc_destroy_vport_work_array(phba, vports);
41271- atomic_set(&phba->num_rsrc_err, 0);
41272- atomic_set(&phba->num_cmd_success, 0);
41273+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41274+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41275 }
41276
41277 /**
41278@@ -454,8 +454,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41279 }
41280 }
41281 lpfc_destroy_vport_work_array(phba, vports);
41282- atomic_set(&phba->num_rsrc_err, 0);
41283- atomic_set(&phba->num_cmd_success, 0);
41284+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41285+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41286 }
41287
41288 /**
41289diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41290index b46f5e9..c4c4ccb 100644
41291--- a/drivers/scsi/pmcraid.c
41292+++ b/drivers/scsi/pmcraid.c
41293@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41294 res->scsi_dev = scsi_dev;
41295 scsi_dev->hostdata = res;
41296 res->change_detected = 0;
41297- atomic_set(&res->read_failures, 0);
41298- atomic_set(&res->write_failures, 0);
41299+ atomic_set_unchecked(&res->read_failures, 0);
41300+ atomic_set_unchecked(&res->write_failures, 0);
41301 rc = 0;
41302 }
41303 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41304@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41305
41306 /* If this was a SCSI read/write command keep count of errors */
41307 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41308- atomic_inc(&res->read_failures);
41309+ atomic_inc_unchecked(&res->read_failures);
41310 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41311- atomic_inc(&res->write_failures);
41312+ atomic_inc_unchecked(&res->write_failures);
41313
41314 if (!RES_IS_GSCSI(res->cfg_entry) &&
41315 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41316@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
41317 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41318 * hrrq_id assigned here in queuecommand
41319 */
41320- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41321+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41322 pinstance->num_hrrq;
41323 cmd->cmd_done = pmcraid_io_done;
41324
41325@@ -3859,7 +3859,7 @@ static long pmcraid_ioctl_passthrough(
41326 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
41327 * hrrq_id assigned here in queuecommand
41328 */
41329- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
41330+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
41331 pinstance->num_hrrq;
41332
41333 if (request_size) {
41334@@ -4497,7 +4497,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41335
41336 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41337 /* add resources only after host is added into system */
41338- if (!atomic_read(&pinstance->expose_resources))
41339+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41340 return;
41341
41342 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
41343@@ -5324,8 +5324,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
41344 init_waitqueue_head(&pinstance->reset_wait_q);
41345
41346 atomic_set(&pinstance->outstanding_cmds, 0);
41347- atomic_set(&pinstance->last_message_id, 0);
41348- atomic_set(&pinstance->expose_resources, 0);
41349+ atomic_set_unchecked(&pinstance->last_message_id, 0);
41350+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41351
41352 INIT_LIST_HEAD(&pinstance->free_res_q);
41353 INIT_LIST_HEAD(&pinstance->used_res_q);
41354@@ -6038,7 +6038,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
41355 /* Schedule worker thread to handle CCN and take care of adding and
41356 * removing devices to OS
41357 */
41358- atomic_set(&pinstance->expose_resources, 1);
41359+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41360 schedule_work(&pinstance->worker_q);
41361 return rc;
41362
41363diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41364index e1d150f..6c6df44 100644
41365--- a/drivers/scsi/pmcraid.h
41366+++ b/drivers/scsi/pmcraid.h
41367@@ -748,7 +748,7 @@ struct pmcraid_instance {
41368 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
41369
41370 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
41371- atomic_t last_message_id;
41372+ atomic_unchecked_t last_message_id;
41373
41374 /* configuration table */
41375 struct pmcraid_config_table *cfg_table;
41376@@ -777,7 +777,7 @@ struct pmcraid_instance {
41377 atomic_t outstanding_cmds;
41378
41379 /* should add/delete resources to mid-layer now ?*/
41380- atomic_t expose_resources;
41381+ atomic_unchecked_t expose_resources;
41382
41383
41384
41385@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
41386 struct pmcraid_config_table_entry_ext cfg_entry_ext;
41387 };
41388 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41389- atomic_t read_failures; /* count of failed READ commands */
41390- atomic_t write_failures; /* count of failed WRITE commands */
41391+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41392+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41393
41394 /* To indicate add/delete/modify during CCN */
41395 u8 change_detected;
41396diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
41397index 83d7984..a27d947 100644
41398--- a/drivers/scsi/qla2xxx/qla_attr.c
41399+++ b/drivers/scsi/qla2xxx/qla_attr.c
41400@@ -1969,7 +1969,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
41401 return 0;
41402 }
41403
41404-struct fc_function_template qla2xxx_transport_functions = {
41405+fc_function_template_no_const qla2xxx_transport_functions = {
41406
41407 .show_host_node_name = 1,
41408 .show_host_port_name = 1,
41409@@ -2016,7 +2016,7 @@ struct fc_function_template qla2xxx_transport_functions = {
41410 .bsg_timeout = qla24xx_bsg_timeout,
41411 };
41412
41413-struct fc_function_template qla2xxx_transport_vport_functions = {
41414+fc_function_template_no_const qla2xxx_transport_vport_functions = {
41415
41416 .show_host_node_name = 1,
41417 .show_host_port_name = 1,
41418diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
41419index 2411d1a..4673766 100644
41420--- a/drivers/scsi/qla2xxx/qla_gbl.h
41421+++ b/drivers/scsi/qla2xxx/qla_gbl.h
41422@@ -515,8 +515,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
41423 struct device_attribute;
41424 extern struct device_attribute *qla2x00_host_attrs[];
41425 struct fc_function_template;
41426-extern struct fc_function_template qla2xxx_transport_functions;
41427-extern struct fc_function_template qla2xxx_transport_vport_functions;
41428+extern fc_function_template_no_const qla2xxx_transport_functions;
41429+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
41430 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
41431 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
41432 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
41433diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
41434index 10d23f8..a7d5d4c 100644
41435--- a/drivers/scsi/qla2xxx/qla_os.c
41436+++ b/drivers/scsi/qla2xxx/qla_os.c
41437@@ -1472,8 +1472,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
41438 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
41439 /* Ok, a 64bit DMA mask is applicable. */
41440 ha->flags.enable_64bit_addressing = 1;
41441- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41442- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41443+ pax_open_kernel();
41444+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
41445+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
41446+ pax_close_kernel();
41447 return;
41448 }
41449 }
41450diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41451index 329d553..f20d31d 100644
41452--- a/drivers/scsi/qla4xxx/ql4_def.h
41453+++ b/drivers/scsi/qla4xxx/ql4_def.h
41454@@ -273,7 +273,7 @@ struct ddb_entry {
41455 * (4000 only) */
41456 atomic_t relogin_timer; /* Max Time to wait for
41457 * relogin to complete */
41458- atomic_t relogin_retry_count; /* Num of times relogin has been
41459+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41460 * retried */
41461 uint32_t default_time2wait; /* Default Min time between
41462 * relogins (+aens) */
41463diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41464index 4cec123..7c1329f 100644
41465--- a/drivers/scsi/qla4xxx/ql4_os.c
41466+++ b/drivers/scsi/qla4xxx/ql4_os.c
41467@@ -2621,12 +2621,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
41468 */
41469 if (!iscsi_is_session_online(cls_sess)) {
41470 /* Reset retry relogin timer */
41471- atomic_inc(&ddb_entry->relogin_retry_count);
41472+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41473 DEBUG2(ql4_printk(KERN_INFO, ha,
41474 "%s: index[%d] relogin timed out-retrying"
41475 " relogin (%d), retry (%d)\n", __func__,
41476 ddb_entry->fw_ddb_index,
41477- atomic_read(&ddb_entry->relogin_retry_count),
41478+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
41479 ddb_entry->default_time2wait + 4));
41480 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
41481 atomic_set(&ddb_entry->retry_relogin_timer,
41482@@ -4738,7 +4738,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
41483
41484 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41485 atomic_set(&ddb_entry->relogin_timer, 0);
41486- atomic_set(&ddb_entry->relogin_retry_count, 0);
41487+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41488 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
41489 ddb_entry->default_relogin_timeout =
41490 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
41491diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41492index 2c0d0ec..4e8681a 100644
41493--- a/drivers/scsi/scsi.c
41494+++ b/drivers/scsi/scsi.c
41495@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41496 unsigned long timeout;
41497 int rtn = 0;
41498
41499- atomic_inc(&cmd->device->iorequest_cnt);
41500+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41501
41502 /* check if the device is still usable */
41503 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41504diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41505index f1bf5af..f67e943 100644
41506--- a/drivers/scsi/scsi_lib.c
41507+++ b/drivers/scsi/scsi_lib.c
41508@@ -1454,7 +1454,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41509 shost = sdev->host;
41510 scsi_init_cmd_errh(cmd);
41511 cmd->result = DID_NO_CONNECT << 16;
41512- atomic_inc(&cmd->device->iorequest_cnt);
41513+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41514
41515 /*
41516 * SCSI request completion path will do scsi_device_unbusy(),
41517@@ -1480,9 +1480,9 @@ static void scsi_softirq_done(struct request *rq)
41518
41519 INIT_LIST_HEAD(&cmd->eh_entry);
41520
41521- atomic_inc(&cmd->device->iodone_cnt);
41522+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41523 if (cmd->result)
41524- atomic_inc(&cmd->device->ioerr_cnt);
41525+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41526
41527 disposition = scsi_decide_disposition(cmd);
41528 if (disposition != SUCCESS &&
41529diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41530index 931a7d9..0c2a754 100644
41531--- a/drivers/scsi/scsi_sysfs.c
41532+++ b/drivers/scsi/scsi_sysfs.c
41533@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41534 char *buf) \
41535 { \
41536 struct scsi_device *sdev = to_scsi_device(dev); \
41537- unsigned long long count = atomic_read(&sdev->field); \
41538+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41539 return snprintf(buf, 20, "0x%llx\n", count); \
41540 } \
41541 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41542diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41543index 84a1fdf..693b0d6 100644
41544--- a/drivers/scsi/scsi_tgt_lib.c
41545+++ b/drivers/scsi/scsi_tgt_lib.c
41546@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41547 int err;
41548
41549 dprintk("%lx %u\n", uaddr, len);
41550- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41551+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41552 if (err) {
41553 /*
41554 * TODO: need to fixup sg_tablesize, max_segment_size,
41555diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41556index e894ca7..de9d7660 100644
41557--- a/drivers/scsi/scsi_transport_fc.c
41558+++ b/drivers/scsi/scsi_transport_fc.c
41559@@ -498,7 +498,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
41560 * Netlink Infrastructure
41561 */
41562
41563-static atomic_t fc_event_seq;
41564+static atomic_unchecked_t fc_event_seq;
41565
41566 /**
41567 * fc_get_event_number - Obtain the next sequential FC event number
41568@@ -511,7 +511,7 @@ static atomic_t fc_event_seq;
41569 u32
41570 fc_get_event_number(void)
41571 {
41572- return atomic_add_return(1, &fc_event_seq);
41573+ return atomic_add_return_unchecked(1, &fc_event_seq);
41574 }
41575 EXPORT_SYMBOL(fc_get_event_number);
41576
41577@@ -659,7 +659,7 @@ static __init int fc_transport_init(void)
41578 {
41579 int error;
41580
41581- atomic_set(&fc_event_seq, 0);
41582+ atomic_set_unchecked(&fc_event_seq, 0);
41583
41584 error = transport_class_register(&fc_host_class);
41585 if (error)
41586@@ -849,7 +849,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
41587 char *cp;
41588
41589 *val = simple_strtoul(buf, &cp, 0);
41590- if ((*cp && (*cp != '\n')) || (*val < 0))
41591+ if (*cp && (*cp != '\n'))
41592 return -EINVAL;
41593 /*
41594 * Check for overflow; dev_loss_tmo is u32
41595diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41596index 31969f2..2b348f0 100644
41597--- a/drivers/scsi/scsi_transport_iscsi.c
41598+++ b/drivers/scsi/scsi_transport_iscsi.c
41599@@ -79,7 +79,7 @@ struct iscsi_internal {
41600 struct transport_container session_cont;
41601 };
41602
41603-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41604+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41605 static struct workqueue_struct *iscsi_eh_timer_workq;
41606
41607 static DEFINE_IDA(iscsi_sess_ida);
41608@@ -1064,7 +1064,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41609 int err;
41610
41611 ihost = shost->shost_data;
41612- session->sid = atomic_add_return(1, &iscsi_session_nr);
41613+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41614
41615 if (target_id == ISCSI_MAX_TARGET) {
41616 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
41617@@ -2943,7 +2943,7 @@ static __init int iscsi_transport_init(void)
41618 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41619 ISCSI_TRANSPORT_VERSION);
41620
41621- atomic_set(&iscsi_session_nr, 0);
41622+ atomic_set_unchecked(&iscsi_session_nr, 0);
41623
41624 err = class_register(&iscsi_transport_class);
41625 if (err)
41626diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41627index f379c7f..e8fc69c 100644
41628--- a/drivers/scsi/scsi_transport_srp.c
41629+++ b/drivers/scsi/scsi_transport_srp.c
41630@@ -33,7 +33,7 @@
41631 #include "scsi_transport_srp_internal.h"
41632
41633 struct srp_host_attrs {
41634- atomic_t next_port_id;
41635+ atomic_unchecked_t next_port_id;
41636 };
41637 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41638
41639@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41640 struct Scsi_Host *shost = dev_to_shost(dev);
41641 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41642
41643- atomic_set(&srp_host->next_port_id, 0);
41644+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41645 return 0;
41646 }
41647
41648@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41649 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41650 rport->roles = ids->roles;
41651
41652- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41653+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41654 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41655
41656 transport_setup_device(&rport->dev);
41657diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
41658index 7992635..609faf8 100644
41659--- a/drivers/scsi/sd.c
41660+++ b/drivers/scsi/sd.c
41661@@ -2909,7 +2909,7 @@ static int sd_probe(struct device *dev)
41662 sdkp->disk = gd;
41663 sdkp->index = index;
41664 atomic_set(&sdkp->openers, 0);
41665- atomic_set(&sdkp->device->ioerr_cnt, 0);
41666+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
41667
41668 if (!sdp->request_queue->rq_timeout) {
41669 if (sdp->type != TYPE_MOD)
41670diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41671index be2c9a6..275525c 100644
41672--- a/drivers/scsi/sg.c
41673+++ b/drivers/scsi/sg.c
41674@@ -1101,7 +1101,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
41675 sdp->disk->disk_name,
41676 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41677 NULL,
41678- (char *)arg);
41679+ (char __user *)arg);
41680 case BLKTRACESTART:
41681 return blk_trace_startstop(sdp->device->request_queue, 1);
41682 case BLKTRACESTOP:
41683diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41684index 19ee901..6e8c2ef 100644
41685--- a/drivers/spi/spi.c
41686+++ b/drivers/spi/spi.c
41687@@ -1616,7 +1616,7 @@ int spi_bus_unlock(struct spi_master *master)
41688 EXPORT_SYMBOL_GPL(spi_bus_unlock);
41689
41690 /* portable code must never pass more than 32 bytes */
41691-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41692+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
41693
41694 static u8 *buf;
41695
41696diff --git a/drivers/staging/iio/iio_hwmon.c b/drivers/staging/iio/iio_hwmon.c
41697index c7a5f97..71ecd35 100644
41698--- a/drivers/staging/iio/iio_hwmon.c
41699+++ b/drivers/staging/iio/iio_hwmon.c
41700@@ -72,7 +72,7 @@ static void iio_hwmon_free_attrs(struct iio_hwmon_state *st)
41701 static int iio_hwmon_probe(struct platform_device *pdev)
41702 {
41703 struct iio_hwmon_state *st;
41704- struct sensor_device_attribute *a;
41705+ sensor_device_attribute_no_const *a;
41706 int ret, i;
41707 int in_i = 1, temp_i = 1, curr_i = 1;
41708 enum iio_chan_type type;
41709diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41710index 34afc16..ffe44dd 100644
41711--- a/drivers/staging/octeon/ethernet-rx.c
41712+++ b/drivers/staging/octeon/ethernet-rx.c
41713@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
41714 /* Increment RX stats for virtual ports */
41715 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41716 #ifdef CONFIG_64BIT
41717- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41718- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41719+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41720+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41721 #else
41722- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41723- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41724+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41725+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
41726 #endif
41727 }
41728 netif_receive_skb(skb);
41729@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
41730 dev->name);
41731 */
41732 #ifdef CONFIG_64BIT
41733- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
41734+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41735 #else
41736- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
41737+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
41738 #endif
41739 dev_kfree_skb_irq(skb);
41740 }
41741diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
41742index ef32dc1..a159d68 100644
41743--- a/drivers/staging/octeon/ethernet.c
41744+++ b/drivers/staging/octeon/ethernet.c
41745@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
41746 * since the RX tasklet also increments it.
41747 */
41748 #ifdef CONFIG_64BIT
41749- atomic64_add(rx_status.dropped_packets,
41750- (atomic64_t *)&priv->stats.rx_dropped);
41751+ atomic64_add_unchecked(rx_status.dropped_packets,
41752+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
41753 #else
41754- atomic_add(rx_status.dropped_packets,
41755- (atomic_t *)&priv->stats.rx_dropped);
41756+ atomic_add_unchecked(rx_status.dropped_packets,
41757+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
41758 #endif
41759 }
41760
41761diff --git a/drivers/staging/ramster/tmem.c b/drivers/staging/ramster/tmem.c
41762index a2b7e03..aaf3630 100644
41763--- a/drivers/staging/ramster/tmem.c
41764+++ b/drivers/staging/ramster/tmem.c
41765@@ -50,25 +50,25 @@
41766 * A tmem host implementation must use this function to register callbacks
41767 * for memory allocation.
41768 */
41769-static struct tmem_hostops tmem_hostops;
41770+static struct tmem_hostops *tmem_hostops;
41771
41772 static void tmem_objnode_tree_init(void);
41773
41774 void tmem_register_hostops(struct tmem_hostops *m)
41775 {
41776 tmem_objnode_tree_init();
41777- tmem_hostops = *m;
41778+ tmem_hostops = m;
41779 }
41780
41781 /*
41782 * A tmem host implementation must use this function to register
41783 * callbacks for a page-accessible memory (PAM) implementation.
41784 */
41785-static struct tmem_pamops tmem_pamops;
41786+static struct tmem_pamops *tmem_pamops;
41787
41788 void tmem_register_pamops(struct tmem_pamops *m)
41789 {
41790- tmem_pamops = *m;
41791+ tmem_pamops = m;
41792 }
41793
41794 /*
41795@@ -174,7 +174,7 @@ static void tmem_obj_init(struct tmem_obj *obj, struct tmem_hashbucket *hb,
41796 obj->pampd_count = 0;
41797 #ifdef CONFIG_RAMSTER
41798 if (tmem_pamops.new_obj != NULL)
41799- (*tmem_pamops.new_obj)(obj);
41800+ (tmem_pamops->new_obj)(obj);
41801 #endif
41802 SET_SENTINEL(obj, OBJ);
41803
41804@@ -210,7 +210,7 @@ static void tmem_pool_flush(struct tmem_pool *pool, bool destroy)
41805 rbnode = rb_next(rbnode);
41806 tmem_pampd_destroy_all_in_obj(obj, true);
41807 tmem_obj_free(obj, hb);
41808- (*tmem_hostops.obj_free)(obj, pool);
41809+ (tmem_hostops->obj_free)(obj, pool);
41810 }
41811 spin_unlock(&hb->lock);
41812 }
41813@@ -261,7 +261,7 @@ static struct tmem_objnode *tmem_objnode_alloc(struct tmem_obj *obj)
41814 ASSERT_SENTINEL(obj, OBJ);
41815 BUG_ON(obj->pool == NULL);
41816 ASSERT_SENTINEL(obj->pool, POOL);
41817- objnode = (*tmem_hostops.objnode_alloc)(obj->pool);
41818+ objnode = (tmem_hostops->objnode_alloc)(obj->pool);
41819 if (unlikely(objnode == NULL))
41820 goto out;
41821 objnode->obj = obj;
41822@@ -290,7 +290,7 @@ static void tmem_objnode_free(struct tmem_objnode *objnode)
41823 ASSERT_SENTINEL(pool, POOL);
41824 objnode->obj->objnode_count--;
41825 objnode->obj = NULL;
41826- (*tmem_hostops.objnode_free)(objnode, pool);
41827+ (tmem_hostops->objnode_free)(objnode, pool);
41828 }
41829
41830 /*
41831@@ -348,7 +348,7 @@ static void *tmem_pampd_replace_in_obj(struct tmem_obj *obj, uint32_t index,
41832 void *old_pampd = *(void **)slot;
41833 *(void **)slot = new_pampd;
41834 if (!no_free)
41835- (*tmem_pamops.free)(old_pampd, obj->pool,
41836+ (tmem_pamops->free)(old_pampd, obj->pool,
41837 NULL, 0, false);
41838 ret = new_pampd;
41839 }
41840@@ -505,7 +505,7 @@ static void tmem_objnode_node_destroy(struct tmem_obj *obj,
41841 if (objnode->slots[i]) {
41842 if (ht == 1) {
41843 obj->pampd_count--;
41844- (*tmem_pamops.free)(objnode->slots[i],
41845+ (tmem_pamops->free)(objnode->slots[i],
41846 obj->pool, NULL, 0, true);
41847 objnode->slots[i] = NULL;
41848 continue;
41849@@ -524,7 +524,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
41850 return;
41851 if (obj->objnode_tree_height == 0) {
41852 obj->pampd_count--;
41853- (*tmem_pamops.free)(obj->objnode_tree_root,
41854+ (tmem_pamops->free)(obj->objnode_tree_root,
41855 obj->pool, NULL, 0, true);
41856 } else {
41857 tmem_objnode_node_destroy(obj, obj->objnode_tree_root,
41858@@ -535,7 +535,7 @@ static void tmem_pampd_destroy_all_in_obj(struct tmem_obj *obj,
41859 obj->objnode_tree_root = NULL;
41860 #ifdef CONFIG_RAMSTER
41861 if (tmem_pamops.free_obj != NULL)
41862- (*tmem_pamops.free_obj)(obj->pool, obj, pool_destroy);
41863+ (tmem_pamops->free_obj)(obj->pool, obj, pool_destroy);
41864 #endif
41865 }
41866
41867@@ -574,7 +574,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41868 /* if found, is a dup put, flush the old one */
41869 pampd_del = tmem_pampd_delete_from_obj(obj, index);
41870 BUG_ON(pampd_del != pampd);
41871- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
41872+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
41873 if (obj->pampd_count == 0) {
41874 objnew = obj;
41875 objfound = NULL;
41876@@ -582,7 +582,7 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41877 pampd = NULL;
41878 }
41879 } else {
41880- obj = objnew = (*tmem_hostops.obj_alloc)(pool);
41881+ obj = objnew = (tmem_hostops->obj_alloc)(pool);
41882 if (unlikely(obj == NULL)) {
41883 ret = -ENOMEM;
41884 goto out;
41885@@ -597,16 +597,16 @@ int tmem_put(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41886 if (unlikely(ret == -ENOMEM))
41887 /* may have partially built objnode tree ("stump") */
41888 goto delete_and_free;
41889- (*tmem_pamops.create_finish)(pampd, is_ephemeral(pool));
41890+ (tmem_pamops->create_finish)(pampd, is_ephemeral(pool));
41891 goto out;
41892
41893 delete_and_free:
41894 (void)tmem_pampd_delete_from_obj(obj, index);
41895 if (pampd)
41896- (*tmem_pamops.free)(pampd, pool, NULL, 0, true);
41897+ (tmem_pamops->free)(pampd, pool, NULL, 0, true);
41898 if (objnew) {
41899 tmem_obj_free(objnew, hb);
41900- (*tmem_hostops.obj_free)(objnew, pool);
41901+ (tmem_hostops->obj_free)(objnew, pool);
41902 }
41903 out:
41904 spin_unlock(&hb->lock);
41905@@ -651,7 +651,7 @@ void tmem_localify_finish(struct tmem_obj *obj, uint32_t index,
41906 if (pampd != NULL) {
41907 BUG_ON(obj == NULL);
41908 (void)tmem_pampd_replace_in_obj(obj, index, pampd, 1);
41909- (*tmem_pamops.create_finish)(pampd, is_ephemeral(obj->pool));
41910+ (tmem_pamops->create_finish)(pampd, is_ephemeral(obj->pool));
41911 } else if (delete) {
41912 BUG_ON(obj == NULL);
41913 (void)tmem_pampd_delete_from_obj(obj, index);
41914@@ -671,7 +671,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
41915 int ret = 0;
41916
41917 if (!is_ephemeral(pool))
41918- new_pampd = (*tmem_pamops.repatriate_preload)(
41919+ new_pampd = (tmem_pamops->repatriate_preload)(
41920 old_pampd, pool, oidp, index, &intransit);
41921 if (intransit)
41922 ret = -EAGAIN;
41923@@ -680,7 +680,7 @@ static int tmem_repatriate(void **ppampd, struct tmem_hashbucket *hb,
41924 /* must release the hb->lock else repatriate can't sleep */
41925 spin_unlock(&hb->lock);
41926 if (!intransit)
41927- ret = (*tmem_pamops.repatriate)(old_pampd, new_pampd, pool,
41928+ ret = (tmem_pamops->repatriate)(old_pampd, new_pampd, pool,
41929 oidp, index, free, data);
41930 if (ret == -EAGAIN) {
41931 /* rare I think, but should cond_resched()??? */
41932@@ -714,7 +714,7 @@ int tmem_replace(struct tmem_pool *pool, struct tmem_oid *oidp,
41933 new_pampd = tmem_pampd_replace_in_obj(obj, index, new_pampd, 0);
41934 /* if we bug here, pamops wasn't properly set up for ramster */
41935 BUG_ON(tmem_pamops.replace_in_obj == NULL);
41936- ret = (*tmem_pamops.replace_in_obj)(new_pampd, obj);
41937+ ret = (tmem_pamops->replace_in_obj)(new_pampd, obj);
41938 out:
41939 spin_unlock(&hb->lock);
41940 return ret;
41941@@ -776,15 +776,15 @@ int tmem_get(struct tmem_pool *pool, struct tmem_oid *oidp, uint32_t index,
41942 if (free) {
41943 if (obj->pampd_count == 0) {
41944 tmem_obj_free(obj, hb);
41945- (*tmem_hostops.obj_free)(obj, pool);
41946+ (tmem_hostops->obj_free)(obj, pool);
41947 obj = NULL;
41948 }
41949 }
41950 if (free)
41951- ret = (*tmem_pamops.get_data_and_free)(
41952+ ret = (tmem_pamops->get_data_and_free)(
41953 data, sizep, raw, pampd, pool, oidp, index);
41954 else
41955- ret = (*tmem_pamops.get_data)(
41956+ ret = (tmem_pamops->get_data)(
41957 data, sizep, raw, pampd, pool, oidp, index);
41958 if (ret < 0)
41959 goto out;
41960@@ -816,10 +816,10 @@ int tmem_flush_page(struct tmem_pool *pool,
41961 pampd = tmem_pampd_delete_from_obj(obj, index);
41962 if (pampd == NULL)
41963 goto out;
41964- (*tmem_pamops.free)(pampd, pool, oidp, index, true);
41965+ (tmem_pamops->free)(pampd, pool, oidp, index, true);
41966 if (obj->pampd_count == 0) {
41967 tmem_obj_free(obj, hb);
41968- (*tmem_hostops.obj_free)(obj, pool);
41969+ (tmem_hostops->obj_free)(obj, pool);
41970 }
41971 ret = 0;
41972
41973@@ -844,7 +844,7 @@ int tmem_flush_object(struct tmem_pool *pool, struct tmem_oid *oidp)
41974 goto out;
41975 tmem_pampd_destroy_all_in_obj(obj, false);
41976 tmem_obj_free(obj, hb);
41977- (*tmem_hostops.obj_free)(obj, pool);
41978+ (tmem_hostops->obj_free)(obj, pool);
41979 ret = 0;
41980
41981 out:
41982diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
41983index dc23395..cf7e9b1 100644
41984--- a/drivers/staging/rtl8712/rtl871x_io.h
41985+++ b/drivers/staging/rtl8712/rtl871x_io.h
41986@@ -108,7 +108,7 @@ struct _io_ops {
41987 u8 *pmem);
41988 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
41989 u8 *pmem);
41990-};
41991+} __no_const;
41992
41993 struct io_req {
41994 struct list_head list;
41995diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
41996index 1f5088b..0e59820 100644
41997--- a/drivers/staging/sbe-2t3e3/netdev.c
41998+++ b/drivers/staging/sbe-2t3e3/netdev.c
41999@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42000 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
42001
42002 if (rlen)
42003- if (copy_to_user(data, &resp, rlen))
42004+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
42005 return -EFAULT;
42006
42007 return 0;
42008diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42009index 5dddc4d..34fcb2f 100644
42010--- a/drivers/staging/usbip/vhci.h
42011+++ b/drivers/staging/usbip/vhci.h
42012@@ -83,7 +83,7 @@ struct vhci_hcd {
42013 unsigned resuming:1;
42014 unsigned long re_timeout;
42015
42016- atomic_t seqnum;
42017+ atomic_unchecked_t seqnum;
42018
42019 /*
42020 * NOTE:
42021diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42022index c3aa219..bf8b3de 100644
42023--- a/drivers/staging/usbip/vhci_hcd.c
42024+++ b/drivers/staging/usbip/vhci_hcd.c
42025@@ -451,7 +451,7 @@ static void vhci_tx_urb(struct urb *urb)
42026 return;
42027 }
42028
42029- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42030+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42031 if (priv->seqnum == 0xffff)
42032 dev_info(&urb->dev->dev, "seqnum max\n");
42033
42034@@ -703,7 +703,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42035 return -ENOMEM;
42036 }
42037
42038- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42039+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42040 if (unlink->seqnum == 0xffff)
42041 pr_info("seqnum max\n");
42042
42043@@ -907,7 +907,7 @@ static int vhci_start(struct usb_hcd *hcd)
42044 vdev->rhport = rhport;
42045 }
42046
42047- atomic_set(&vhci->seqnum, 0);
42048+ atomic_set_unchecked(&vhci->seqnum, 0);
42049 spin_lock_init(&vhci->lock);
42050
42051 hcd->power_budget = 0; /* no limit */
42052diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42053index ba5f1c0..11d8122 100644
42054--- a/drivers/staging/usbip/vhci_rx.c
42055+++ b/drivers/staging/usbip/vhci_rx.c
42056@@ -77,7 +77,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42057 if (!urb) {
42058 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
42059 pr_info("max seqnum %d\n",
42060- atomic_read(&the_controller->seqnum));
42061+ atomic_read_unchecked(&the_controller->seqnum));
42062 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42063 return;
42064 }
42065diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42066index 5f13890..36a044b 100644
42067--- a/drivers/staging/vt6655/hostap.c
42068+++ b/drivers/staging/vt6655/hostap.c
42069@@ -73,14 +73,13 @@ static int msglevel =MSG_LEVEL_INFO;
42070 *
42071 */
42072
42073+static net_device_ops_no_const apdev_netdev_ops;
42074+
42075 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42076 {
42077 PSDevice apdev_priv;
42078 struct net_device *dev = pDevice->dev;
42079 int ret;
42080- const struct net_device_ops apdev_netdev_ops = {
42081- .ndo_start_xmit = pDevice->tx_80211,
42082- };
42083
42084 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42085
42086@@ -92,6 +91,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42087 *apdev_priv = *pDevice;
42088 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42089
42090+ /* only half broken now */
42091+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42092 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42093
42094 pDevice->apdev->type = ARPHRD_IEEE80211;
42095diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42096index 26a7d0e..897b083 100644
42097--- a/drivers/staging/vt6656/hostap.c
42098+++ b/drivers/staging/vt6656/hostap.c
42099@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
42100 *
42101 */
42102
42103+static net_device_ops_no_const apdev_netdev_ops;
42104+
42105 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42106 {
42107 PSDevice apdev_priv;
42108 struct net_device *dev = pDevice->dev;
42109 int ret;
42110- const struct net_device_ops apdev_netdev_ops = {
42111- .ndo_start_xmit = pDevice->tx_80211,
42112- };
42113
42114 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
42115
42116@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42117 *apdev_priv = *pDevice;
42118 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
42119
42120+ /* only half broken now */
42121+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
42122 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
42123
42124 pDevice->apdev->type = ARPHRD_IEEE80211;
42125diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
42126index 56c8e60..1920c63 100644
42127--- a/drivers/staging/zcache/tmem.c
42128+++ b/drivers/staging/zcache/tmem.c
42129@@ -39,7 +39,7 @@
42130 * A tmem host implementation must use this function to register callbacks
42131 * for memory allocation.
42132 */
42133-static struct tmem_hostops tmem_hostops;
42134+static tmem_hostops_no_const tmem_hostops;
42135
42136 static void tmem_objnode_tree_init(void);
42137
42138@@ -53,7 +53,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
42139 * A tmem host implementation must use this function to register
42140 * callbacks for a page-accessible memory (PAM) implementation
42141 */
42142-static struct tmem_pamops tmem_pamops;
42143+static tmem_pamops_no_const tmem_pamops;
42144
42145 void tmem_register_pamops(struct tmem_pamops *m)
42146 {
42147diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
42148index 0d4aa82..f7832d4 100644
42149--- a/drivers/staging/zcache/tmem.h
42150+++ b/drivers/staging/zcache/tmem.h
42151@@ -180,6 +180,7 @@ struct tmem_pamops {
42152 void (*new_obj)(struct tmem_obj *);
42153 int (*replace_in_obj)(void *, struct tmem_obj *);
42154 };
42155+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
42156 extern void tmem_register_pamops(struct tmem_pamops *m);
42157
42158 /* memory allocation methods provided by the host implementation */
42159@@ -189,6 +190,7 @@ struct tmem_hostops {
42160 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
42161 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
42162 };
42163+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
42164 extern void tmem_register_hostops(struct tmem_hostops *m);
42165
42166 /* core tmem accessor functions */
42167diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
42168index 96f4981..4daaa7e 100644
42169--- a/drivers/target/target_core_device.c
42170+++ b/drivers/target/target_core_device.c
42171@@ -1370,7 +1370,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
42172 spin_lock_init(&dev->se_port_lock);
42173 spin_lock_init(&dev->se_tmr_lock);
42174 spin_lock_init(&dev->qf_cmd_lock);
42175- atomic_set(&dev->dev_ordered_id, 0);
42176+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
42177 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
42178 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
42179 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
42180diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
42181index bd587b7..173daf3 100644
42182--- a/drivers/target/target_core_transport.c
42183+++ b/drivers/target/target_core_transport.c
42184@@ -1077,7 +1077,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
42185 * Used to determine when ORDERED commands should go from
42186 * Dormant to Active status.
42187 */
42188- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
42189+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
42190 smp_mb__after_atomic_inc();
42191 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
42192 cmd->se_ordered_id, cmd->sam_task_attr,
42193diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
42194index b09c8d1f..c4225c0 100644
42195--- a/drivers/tty/cyclades.c
42196+++ b/drivers/tty/cyclades.c
42197@@ -1589,10 +1589,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
42198 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
42199 info->port.count);
42200 #endif
42201- info->port.count++;
42202+ atomic_inc(&info->port.count);
42203 #ifdef CY_DEBUG_COUNT
42204 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
42205- current->pid, info->port.count);
42206+ current->pid, atomic_read(&info->port.count));
42207 #endif
42208
42209 /*
42210@@ -3991,7 +3991,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
42211 for (j = 0; j < cy_card[i].nports; j++) {
42212 info = &cy_card[i].ports[j];
42213
42214- if (info->port.count) {
42215+ if (atomic_read(&info->port.count)) {
42216 /* XXX is the ldisc num worth this? */
42217 struct tty_struct *tty;
42218 struct tty_ldisc *ld;
42219diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
42220index 13ee53b..418d164 100644
42221--- a/drivers/tty/hvc/hvc_console.c
42222+++ b/drivers/tty/hvc/hvc_console.c
42223@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
42224
42225 spin_lock_irqsave(&hp->port.lock, flags);
42226 /* Check and then increment for fast path open. */
42227- if (hp->port.count++ > 0) {
42228+ if (atomic_inc_return(&hp->port.count) > 1) {
42229 spin_unlock_irqrestore(&hp->port.lock, flags);
42230 hvc_kick();
42231 return 0;
42232@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42233
42234 spin_lock_irqsave(&hp->port.lock, flags);
42235
42236- if (--hp->port.count == 0) {
42237+ if (atomic_dec_return(&hp->port.count) == 0) {
42238 spin_unlock_irqrestore(&hp->port.lock, flags);
42239 /* We are done with the tty pointer now. */
42240 tty_port_tty_set(&hp->port, NULL);
42241@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
42242 */
42243 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
42244 } else {
42245- if (hp->port.count < 0)
42246+ if (atomic_read(&hp->port.count) < 0)
42247 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
42248- hp->vtermno, hp->port.count);
42249+ hp->vtermno, atomic_read(&hp->port.count));
42250 spin_unlock_irqrestore(&hp->port.lock, flags);
42251 }
42252 }
42253@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
42254 * open->hangup case this can be called after the final close so prevent
42255 * that from happening for now.
42256 */
42257- if (hp->port.count <= 0) {
42258+ if (atomic_read(&hp->port.count) <= 0) {
42259 spin_unlock_irqrestore(&hp->port.lock, flags);
42260 return;
42261 }
42262
42263- hp->port.count = 0;
42264+ atomic_set(&hp->port.count, 0);
42265 spin_unlock_irqrestore(&hp->port.lock, flags);
42266 tty_port_tty_set(&hp->port, NULL);
42267
42268@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
42269 return -EPIPE;
42270
42271 /* FIXME what's this (unprotected) check for? */
42272- if (hp->port.count <= 0)
42273+ if (atomic_read(&hp->port.count) <= 0)
42274 return -EIO;
42275
42276 spin_lock_irqsave(&hp->lock, flags);
42277diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
42278index 8776357..b2d4afd 100644
42279--- a/drivers/tty/hvc/hvcs.c
42280+++ b/drivers/tty/hvc/hvcs.c
42281@@ -83,6 +83,7 @@
42282 #include <asm/hvcserver.h>
42283 #include <asm/uaccess.h>
42284 #include <asm/vio.h>
42285+#include <asm/local.h>
42286
42287 /*
42288 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
42289@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
42290
42291 spin_lock_irqsave(&hvcsd->lock, flags);
42292
42293- if (hvcsd->port.count > 0) {
42294+ if (atomic_read(&hvcsd->port.count) > 0) {
42295 spin_unlock_irqrestore(&hvcsd->lock, flags);
42296 printk(KERN_INFO "HVCS: vterm state unchanged. "
42297 "The hvcs device node is still in use.\n");
42298@@ -1132,7 +1133,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
42299 }
42300 }
42301
42302- hvcsd->port.count = 0;
42303+ atomic_set(&hvcsd->port.count, 0);
42304 hvcsd->port.tty = tty;
42305 tty->driver_data = hvcsd;
42306
42307@@ -1185,7 +1186,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
42308 unsigned long flags;
42309
42310 spin_lock_irqsave(&hvcsd->lock, flags);
42311- hvcsd->port.count++;
42312+ atomic_inc(&hvcsd->port.count);
42313 hvcsd->todo_mask |= HVCS_SCHED_READ;
42314 spin_unlock_irqrestore(&hvcsd->lock, flags);
42315
42316@@ -1221,7 +1222,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42317 hvcsd = tty->driver_data;
42318
42319 spin_lock_irqsave(&hvcsd->lock, flags);
42320- if (--hvcsd->port.count == 0) {
42321+ if (atomic_dec_and_test(&hvcsd->port.count)) {
42322
42323 vio_disable_interrupts(hvcsd->vdev);
42324
42325@@ -1246,10 +1247,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
42326
42327 free_irq(irq, hvcsd);
42328 return;
42329- } else if (hvcsd->port.count < 0) {
42330+ } else if (atomic_read(&hvcsd->port.count) < 0) {
42331 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
42332 " is missmanaged.\n",
42333- hvcsd->vdev->unit_address, hvcsd->port.count);
42334+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
42335 }
42336
42337 spin_unlock_irqrestore(&hvcsd->lock, flags);
42338@@ -1271,7 +1272,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42339
42340 spin_lock_irqsave(&hvcsd->lock, flags);
42341 /* Preserve this so that we know how many kref refs to put */
42342- temp_open_count = hvcsd->port.count;
42343+ temp_open_count = atomic_read(&hvcsd->port.count);
42344
42345 /*
42346 * Don't kref put inside the spinlock because the destruction
42347@@ -1286,7 +1287,7 @@ static void hvcs_hangup(struct tty_struct * tty)
42348 tty->driver_data = NULL;
42349 hvcsd->port.tty = NULL;
42350
42351- hvcsd->port.count = 0;
42352+ atomic_set(&hvcsd->port.count, 0);
42353
42354 /* This will drop any buffered data on the floor which is OK in a hangup
42355 * scenario. */
42356@@ -1357,7 +1358,7 @@ static int hvcs_write(struct tty_struct *tty,
42357 * the middle of a write operation? This is a crummy place to do this
42358 * but we want to keep it all in the spinlock.
42359 */
42360- if (hvcsd->port.count <= 0) {
42361+ if (atomic_read(&hvcsd->port.count) <= 0) {
42362 spin_unlock_irqrestore(&hvcsd->lock, flags);
42363 return -ENODEV;
42364 }
42365@@ -1431,7 +1432,7 @@ static int hvcs_write_room(struct tty_struct *tty)
42366 {
42367 struct hvcs_struct *hvcsd = tty->driver_data;
42368
42369- if (!hvcsd || hvcsd->port.count <= 0)
42370+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
42371 return 0;
42372
42373 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
42374diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
42375index 2cde13d..645d78f 100644
42376--- a/drivers/tty/ipwireless/tty.c
42377+++ b/drivers/tty/ipwireless/tty.c
42378@@ -29,6 +29,7 @@
42379 #include <linux/tty_driver.h>
42380 #include <linux/tty_flip.h>
42381 #include <linux/uaccess.h>
42382+#include <asm/local.h>
42383
42384 #include "tty.h"
42385 #include "network.h"
42386@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42387 mutex_unlock(&tty->ipw_tty_mutex);
42388 return -ENODEV;
42389 }
42390- if (tty->port.count == 0)
42391+ if (atomic_read(&tty->port.count) == 0)
42392 tty->tx_bytes_queued = 0;
42393
42394- tty->port.count++;
42395+ atomic_inc(&tty->port.count);
42396
42397 tty->port.tty = linux_tty;
42398 linux_tty->driver_data = tty;
42399@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
42400
42401 static void do_ipw_close(struct ipw_tty *tty)
42402 {
42403- tty->port.count--;
42404-
42405- if (tty->port.count == 0) {
42406+ if (atomic_dec_return(&tty->port.count) == 0) {
42407 struct tty_struct *linux_tty = tty->port.tty;
42408
42409 if (linux_tty != NULL) {
42410@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
42411 return;
42412
42413 mutex_lock(&tty->ipw_tty_mutex);
42414- if (tty->port.count == 0) {
42415+ if (atomic_read(&tty->port.count) == 0) {
42416 mutex_unlock(&tty->ipw_tty_mutex);
42417 return;
42418 }
42419@@ -170,7 +169,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
42420 return;
42421 }
42422
42423- if (!tty->port.count) {
42424+ if (!atomic_read(&tty->port.count)) {
42425 mutex_unlock(&tty->ipw_tty_mutex);
42426 return;
42427 }
42428@@ -212,7 +211,7 @@ static int ipw_write(struct tty_struct *linux_tty,
42429 return -ENODEV;
42430
42431 mutex_lock(&tty->ipw_tty_mutex);
42432- if (!tty->port.count) {
42433+ if (!atomic_read(&tty->port.count)) {
42434 mutex_unlock(&tty->ipw_tty_mutex);
42435 return -EINVAL;
42436 }
42437@@ -252,7 +251,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
42438 if (!tty)
42439 return -ENODEV;
42440
42441- if (!tty->port.count)
42442+ if (!atomic_read(&tty->port.count))
42443 return -EINVAL;
42444
42445 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
42446@@ -294,7 +293,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
42447 if (!tty)
42448 return 0;
42449
42450- if (!tty->port.count)
42451+ if (!atomic_read(&tty->port.count))
42452 return 0;
42453
42454 return tty->tx_bytes_queued;
42455@@ -375,7 +374,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
42456 if (!tty)
42457 return -ENODEV;
42458
42459- if (!tty->port.count)
42460+ if (!atomic_read(&tty->port.count))
42461 return -EINVAL;
42462
42463 return get_control_lines(tty);
42464@@ -391,7 +390,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
42465 if (!tty)
42466 return -ENODEV;
42467
42468- if (!tty->port.count)
42469+ if (!atomic_read(&tty->port.count))
42470 return -EINVAL;
42471
42472 return set_control_lines(tty, set, clear);
42473@@ -405,7 +404,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
42474 if (!tty)
42475 return -ENODEV;
42476
42477- if (!tty->port.count)
42478+ if (!atomic_read(&tty->port.count))
42479 return -EINVAL;
42480
42481 /* FIXME: Exactly how is the tty object locked here .. */
42482@@ -561,7 +560,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
42483 * are gone */
42484 mutex_lock(&ttyj->ipw_tty_mutex);
42485 }
42486- while (ttyj->port.count)
42487+ while (atomic_read(&ttyj->port.count))
42488 do_ipw_close(ttyj);
42489 ipwireless_disassociate_network_ttys(network,
42490 ttyj->channel_idx);
42491diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
42492index f9d2850..b006f04 100644
42493--- a/drivers/tty/moxa.c
42494+++ b/drivers/tty/moxa.c
42495@@ -1193,7 +1193,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
42496 }
42497
42498 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
42499- ch->port.count++;
42500+ atomic_inc(&ch->port.count);
42501 tty->driver_data = ch;
42502 tty_port_tty_set(&ch->port, tty);
42503 mutex_lock(&ch->port.mutex);
42504diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
42505index bfd6771..e0d93c4 100644
42506--- a/drivers/tty/n_gsm.c
42507+++ b/drivers/tty/n_gsm.c
42508@@ -1636,7 +1636,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
42509 spin_lock_init(&dlci->lock);
42510 mutex_init(&dlci->mutex);
42511 dlci->fifo = &dlci->_fifo;
42512- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
42513+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
42514 kfree(dlci);
42515 return NULL;
42516 }
42517@@ -2936,7 +2936,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
42518 struct gsm_dlci *dlci = tty->driver_data;
42519 struct tty_port *port = &dlci->port;
42520
42521- port->count++;
42522+ atomic_inc(&port->count);
42523 dlci_get(dlci);
42524 dlci_get(dlci->gsm->dlci[0]);
42525 mux_get(dlci->gsm);
42526diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
42527index 19083ef..6e34e97 100644
42528--- a/drivers/tty/n_tty.c
42529+++ b/drivers/tty/n_tty.c
42530@@ -2196,6 +2196,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
42531 {
42532 *ops = tty_ldisc_N_TTY;
42533 ops->owner = NULL;
42534- ops->refcount = ops->flags = 0;
42535+ atomic_set(&ops->refcount, 0);
42536+ ops->flags = 0;
42537 }
42538 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
42539diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
42540index 79ff3a5..1fe9399 100644
42541--- a/drivers/tty/pty.c
42542+++ b/drivers/tty/pty.c
42543@@ -791,8 +791,10 @@ static void __init unix98_pty_init(void)
42544 panic("Couldn't register Unix98 pts driver");
42545
42546 /* Now create the /dev/ptmx special device */
42547+ pax_open_kernel();
42548 tty_default_fops(&ptmx_fops);
42549- ptmx_fops.open = ptmx_open;
42550+ *(void **)&ptmx_fops.open = ptmx_open;
42551+ pax_close_kernel();
42552
42553 cdev_init(&ptmx_cdev, &ptmx_fops);
42554 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
42555diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
42556index e42009a..566a036 100644
42557--- a/drivers/tty/rocket.c
42558+++ b/drivers/tty/rocket.c
42559@@ -925,7 +925,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
42560 tty->driver_data = info;
42561 tty_port_tty_set(port, tty);
42562
42563- if (port->count++ == 0) {
42564+ if (atomic_inc_return(&port->count) == 1) {
42565 atomic_inc(&rp_num_ports_open);
42566
42567 #ifdef ROCKET_DEBUG_OPEN
42568@@ -934,7 +934,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
42569 #endif
42570 }
42571 #ifdef ROCKET_DEBUG_OPEN
42572- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
42573+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
42574 #endif
42575
42576 /*
42577@@ -1529,7 +1529,7 @@ static void rp_hangup(struct tty_struct *tty)
42578 spin_unlock_irqrestore(&info->port.lock, flags);
42579 return;
42580 }
42581- if (info->port.count)
42582+ if (atomic_read(&info->port.count))
42583 atomic_dec(&rp_num_ports_open);
42584 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
42585 spin_unlock_irqrestore(&info->port.lock, flags);
42586diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
42587index 1002054..dd644a8 100644
42588--- a/drivers/tty/serial/kgdboc.c
42589+++ b/drivers/tty/serial/kgdboc.c
42590@@ -24,8 +24,9 @@
42591 #define MAX_CONFIG_LEN 40
42592
42593 static struct kgdb_io kgdboc_io_ops;
42594+static struct kgdb_io kgdboc_io_ops_console;
42595
42596-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
42597+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
42598 static int configured = -1;
42599
42600 static char config[MAX_CONFIG_LEN];
42601@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
42602 kgdboc_unregister_kbd();
42603 if (configured == 1)
42604 kgdb_unregister_io_module(&kgdboc_io_ops);
42605+ else if (configured == 2)
42606+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
42607 }
42608
42609 static int configure_kgdboc(void)
42610@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
42611 int err;
42612 char *cptr = config;
42613 struct console *cons;
42614+ int is_console = 0;
42615
42616 err = kgdboc_option_setup(config);
42617 if (err || !strlen(config) || isspace(config[0]))
42618 goto noconfig;
42619
42620 err = -ENODEV;
42621- kgdboc_io_ops.is_console = 0;
42622 kgdb_tty_driver = NULL;
42623
42624 kgdboc_use_kms = 0;
42625@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
42626 int idx;
42627 if (cons->device && cons->device(cons, &idx) == p &&
42628 idx == tty_line) {
42629- kgdboc_io_ops.is_console = 1;
42630+ is_console = 1;
42631 break;
42632 }
42633 cons = cons->next;
42634@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
42635 kgdb_tty_line = tty_line;
42636
42637 do_register:
42638- err = kgdb_register_io_module(&kgdboc_io_ops);
42639+ if (is_console) {
42640+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
42641+ configured = 2;
42642+ } else {
42643+ err = kgdb_register_io_module(&kgdboc_io_ops);
42644+ configured = 1;
42645+ }
42646 if (err)
42647 goto noconfig;
42648
42649@@ -205,8 +214,6 @@ do_register:
42650 if (err)
42651 goto nmi_con_failed;
42652
42653- configured = 1;
42654-
42655 return 0;
42656
42657 nmi_con_failed:
42658@@ -223,7 +230,7 @@ noconfig:
42659 static int __init init_kgdboc(void)
42660 {
42661 /* Already configured? */
42662- if (configured == 1)
42663+ if (configured >= 1)
42664 return 0;
42665
42666 return configure_kgdboc();
42667@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
42668 if (config[len - 1] == '\n')
42669 config[len - 1] = '\0';
42670
42671- if (configured == 1)
42672+ if (configured >= 1)
42673 cleanup_kgdboc();
42674
42675 /* Go and configure with the new params. */
42676@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
42677 .post_exception = kgdboc_post_exp_handler,
42678 };
42679
42680+static struct kgdb_io kgdboc_io_ops_console = {
42681+ .name = "kgdboc",
42682+ .read_char = kgdboc_get_char,
42683+ .write_char = kgdboc_put_char,
42684+ .pre_exception = kgdboc_pre_exp_handler,
42685+ .post_exception = kgdboc_post_exp_handler,
42686+ .is_console = 1
42687+};
42688+
42689 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
42690 /* This is only available if kgdboc is a built in for early debugging */
42691 static int __init kgdboc_early_init(char *opt)
42692diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
42693index e514b3a..c73d614 100644
42694--- a/drivers/tty/serial/samsung.c
42695+++ b/drivers/tty/serial/samsung.c
42696@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
42697 }
42698 }
42699
42700+static int s3c64xx_serial_startup(struct uart_port *port);
42701 static int s3c24xx_serial_startup(struct uart_port *port)
42702 {
42703 struct s3c24xx_uart_port *ourport = to_ourport(port);
42704 int ret;
42705
42706+ /* Startup sequence is different for s3c64xx and higher SoC's */
42707+ if (s3c24xx_serial_has_interrupt_mask(port))
42708+ return s3c64xx_serial_startup(port);
42709+
42710 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
42711 port->mapbase, port->membase);
42712
42713@@ -1122,10 +1127,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
42714 /* setup info for port */
42715 port->dev = &platdev->dev;
42716
42717- /* Startup sequence is different for s3c64xx and higher SoC's */
42718- if (s3c24xx_serial_has_interrupt_mask(port))
42719- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
42720-
42721 port->uartclk = 1;
42722
42723 if (cfg->uart_flags & UPF_CONS_FLOW) {
42724diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
42725index 2c7230a..2104f16 100644
42726--- a/drivers/tty/serial/serial_core.c
42727+++ b/drivers/tty/serial/serial_core.c
42728@@ -1455,7 +1455,7 @@ static void uart_hangup(struct tty_struct *tty)
42729 uart_flush_buffer(tty);
42730 uart_shutdown(tty, state);
42731 spin_lock_irqsave(&port->lock, flags);
42732- port->count = 0;
42733+ atomic_set(&port->count, 0);
42734 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
42735 spin_unlock_irqrestore(&port->lock, flags);
42736 tty_port_tty_set(port, NULL);
42737@@ -1551,7 +1551,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
42738 goto end;
42739 }
42740
42741- port->count++;
42742+ atomic_inc(&port->count);
42743 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
42744 retval = -ENXIO;
42745 goto err_dec_count;
42746@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
42747 /*
42748 * Make sure the device is in D0 state.
42749 */
42750- if (port->count == 1)
42751+ if (atomic_read(&port->count) == 1)
42752 uart_change_pm(state, 0);
42753
42754 /*
42755@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
42756 end:
42757 return retval;
42758 err_dec_count:
42759- port->count--;
42760+ atomic_inc(&port->count);
42761 mutex_unlock(&port->mutex);
42762 goto end;
42763 }
42764diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
42765index 9e071f6..f30ae69 100644
42766--- a/drivers/tty/synclink.c
42767+++ b/drivers/tty/synclink.c
42768@@ -3095,7 +3095,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
42769
42770 if (debug_level >= DEBUG_LEVEL_INFO)
42771 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
42772- __FILE__,__LINE__, info->device_name, info->port.count);
42773+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
42774
42775 if (tty_port_close_start(&info->port, tty, filp) == 0)
42776 goto cleanup;
42777@@ -3113,7 +3113,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
42778 cleanup:
42779 if (debug_level >= DEBUG_LEVEL_INFO)
42780 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
42781- tty->driver->name, info->port.count);
42782+ tty->driver->name, atomic_read(&info->port.count));
42783
42784 } /* end of mgsl_close() */
42785
42786@@ -3212,8 +3212,8 @@ static void mgsl_hangup(struct tty_struct *tty)
42787
42788 mgsl_flush_buffer(tty);
42789 shutdown(info);
42790-
42791- info->port.count = 0;
42792+
42793+ atomic_set(&info->port.count, 0);
42794 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
42795 info->port.tty = NULL;
42796
42797@@ -3302,12 +3302,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
42798
42799 if (debug_level >= DEBUG_LEVEL_INFO)
42800 printk("%s(%d):block_til_ready before block on %s count=%d\n",
42801- __FILE__,__LINE__, tty->driver->name, port->count );
42802+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
42803
42804 spin_lock_irqsave(&info->irq_spinlock, flags);
42805 if (!tty_hung_up_p(filp)) {
42806 extra_count = true;
42807- port->count--;
42808+ atomic_dec(&port->count);
42809 }
42810 spin_unlock_irqrestore(&info->irq_spinlock, flags);
42811 port->blocked_open++;
42812@@ -3336,7 +3336,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
42813
42814 if (debug_level >= DEBUG_LEVEL_INFO)
42815 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
42816- __FILE__,__LINE__, tty->driver->name, port->count );
42817+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
42818
42819 tty_unlock(tty);
42820 schedule();
42821@@ -3348,12 +3348,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
42822
42823 /* FIXME: Racy on hangup during close wait */
42824 if (extra_count)
42825- port->count++;
42826+ atomic_inc(&port->count);
42827 port->blocked_open--;
42828
42829 if (debug_level >= DEBUG_LEVEL_INFO)
42830 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
42831- __FILE__,__LINE__, tty->driver->name, port->count );
42832+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
42833
42834 if (!retval)
42835 port->flags |= ASYNC_NORMAL_ACTIVE;
42836@@ -3405,7 +3405,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
42837
42838 if (debug_level >= DEBUG_LEVEL_INFO)
42839 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
42840- __FILE__,__LINE__,tty->driver->name, info->port.count);
42841+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
42842
42843 /* If port is closing, signal caller to try again */
42844 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
42845@@ -3424,10 +3424,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
42846 spin_unlock_irqrestore(&info->netlock, flags);
42847 goto cleanup;
42848 }
42849- info->port.count++;
42850+ atomic_inc(&info->port.count);
42851 spin_unlock_irqrestore(&info->netlock, flags);
42852
42853- if (info->port.count == 1) {
42854+ if (atomic_read(&info->port.count) == 1) {
42855 /* 1st open on this device, init hardware */
42856 retval = startup(info);
42857 if (retval < 0)
42858@@ -3451,8 +3451,8 @@ cleanup:
42859 if (retval) {
42860 if (tty->count == 1)
42861 info->port.tty = NULL; /* tty layer will release tty struct */
42862- if(info->port.count)
42863- info->port.count--;
42864+ if (atomic_read(&info->port.count))
42865+ atomic_dec(&info->port.count);
42866 }
42867
42868 return retval;
42869@@ -7662,7 +7662,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
42870 unsigned short new_crctype;
42871
42872 /* return error if TTY interface open */
42873- if (info->port.count)
42874+ if (atomic_read(&info->port.count))
42875 return -EBUSY;
42876
42877 switch (encoding)
42878@@ -7757,7 +7757,7 @@ static int hdlcdev_open(struct net_device *dev)
42879
42880 /* arbitrate between network and tty opens */
42881 spin_lock_irqsave(&info->netlock, flags);
42882- if (info->port.count != 0 || info->netcount != 0) {
42883+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
42884 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
42885 spin_unlock_irqrestore(&info->netlock, flags);
42886 return -EBUSY;
42887@@ -7843,7 +7843,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42888 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
42889
42890 /* return error if TTY interface open */
42891- if (info->port.count)
42892+ if (atomic_read(&info->port.count))
42893 return -EBUSY;
42894
42895 if (cmd != SIOCWANDEV)
42896diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
42897index aba1e59..877ac33 100644
42898--- a/drivers/tty/synclink_gt.c
42899+++ b/drivers/tty/synclink_gt.c
42900@@ -671,7 +671,7 @@ static int open(struct tty_struct *tty, struct file *filp)
42901 tty->driver_data = info;
42902 info->port.tty = tty;
42903
42904- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
42905+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
42906
42907 /* If port is closing, signal caller to try again */
42908 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
42909@@ -692,10 +692,10 @@ static int open(struct tty_struct *tty, struct file *filp)
42910 mutex_unlock(&info->port.mutex);
42911 goto cleanup;
42912 }
42913- info->port.count++;
42914+ atomic_inc(&info->port.count);
42915 spin_unlock_irqrestore(&info->netlock, flags);
42916
42917- if (info->port.count == 1) {
42918+ if (atomic_read(&info->port.count) == 1) {
42919 /* 1st open on this device, init hardware */
42920 retval = startup(info);
42921 if (retval < 0) {
42922@@ -716,8 +716,8 @@ cleanup:
42923 if (retval) {
42924 if (tty->count == 1)
42925 info->port.tty = NULL; /* tty layer will release tty struct */
42926- if(info->port.count)
42927- info->port.count--;
42928+ if(atomic_read(&info->port.count))
42929+ atomic_dec(&info->port.count);
42930 }
42931
42932 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
42933@@ -730,7 +730,7 @@ static void close(struct tty_struct *tty, struct file *filp)
42934
42935 if (sanity_check(info, tty->name, "close"))
42936 return;
42937- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
42938+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
42939
42940 if (tty_port_close_start(&info->port, tty, filp) == 0)
42941 goto cleanup;
42942@@ -747,7 +747,7 @@ static void close(struct tty_struct *tty, struct file *filp)
42943 tty_port_close_end(&info->port, tty);
42944 info->port.tty = NULL;
42945 cleanup:
42946- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
42947+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
42948 }
42949
42950 static void hangup(struct tty_struct *tty)
42951@@ -765,7 +765,7 @@ static void hangup(struct tty_struct *tty)
42952 shutdown(info);
42953
42954 spin_lock_irqsave(&info->port.lock, flags);
42955- info->port.count = 0;
42956+ atomic_set(&info->port.count, 0);
42957 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
42958 info->port.tty = NULL;
42959 spin_unlock_irqrestore(&info->port.lock, flags);
42960@@ -1450,7 +1450,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
42961 unsigned short new_crctype;
42962
42963 /* return error if TTY interface open */
42964- if (info->port.count)
42965+ if (atomic_read(&info->port.count))
42966 return -EBUSY;
42967
42968 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
42969@@ -1545,7 +1545,7 @@ static int hdlcdev_open(struct net_device *dev)
42970
42971 /* arbitrate between network and tty opens */
42972 spin_lock_irqsave(&info->netlock, flags);
42973- if (info->port.count != 0 || info->netcount != 0) {
42974+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
42975 DBGINFO(("%s hdlc_open busy\n", dev->name));
42976 spin_unlock_irqrestore(&info->netlock, flags);
42977 return -EBUSY;
42978@@ -1630,7 +1630,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
42979 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
42980
42981 /* return error if TTY interface open */
42982- if (info->port.count)
42983+ if (atomic_read(&info->port.count))
42984 return -EBUSY;
42985
42986 if (cmd != SIOCWANDEV)
42987@@ -2419,7 +2419,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
42988 if (port == NULL)
42989 continue;
42990 spin_lock(&port->lock);
42991- if ((port->port.count || port->netcount) &&
42992+ if ((atomic_read(&port->port.count) || port->netcount) &&
42993 port->pending_bh && !port->bh_running &&
42994 !port->bh_requested) {
42995 DBGISR(("%s bh queued\n", port->device_name));
42996@@ -3308,7 +3308,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
42997 spin_lock_irqsave(&info->lock, flags);
42998 if (!tty_hung_up_p(filp)) {
42999 extra_count = true;
43000- port->count--;
43001+ atomic_dec(&port->count);
43002 }
43003 spin_unlock_irqrestore(&info->lock, flags);
43004 port->blocked_open++;
43005@@ -3345,7 +3345,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43006 remove_wait_queue(&port->open_wait, &wait);
43007
43008 if (extra_count)
43009- port->count++;
43010+ atomic_inc(&port->count);
43011 port->blocked_open--;
43012
43013 if (!retval)
43014diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
43015index fd43fb6..34704ad 100644
43016--- a/drivers/tty/synclinkmp.c
43017+++ b/drivers/tty/synclinkmp.c
43018@@ -751,7 +751,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43019
43020 if (debug_level >= DEBUG_LEVEL_INFO)
43021 printk("%s(%d):%s open(), old ref count = %d\n",
43022- __FILE__,__LINE__,tty->driver->name, info->port.count);
43023+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43024
43025 /* If port is closing, signal caller to try again */
43026 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43027@@ -770,10 +770,10 @@ static int open(struct tty_struct *tty, struct file *filp)
43028 spin_unlock_irqrestore(&info->netlock, flags);
43029 goto cleanup;
43030 }
43031- info->port.count++;
43032+ atomic_inc(&info->port.count);
43033 spin_unlock_irqrestore(&info->netlock, flags);
43034
43035- if (info->port.count == 1) {
43036+ if (atomic_read(&info->port.count) == 1) {
43037 /* 1st open on this device, init hardware */
43038 retval = startup(info);
43039 if (retval < 0)
43040@@ -797,8 +797,8 @@ cleanup:
43041 if (retval) {
43042 if (tty->count == 1)
43043 info->port.tty = NULL; /* tty layer will release tty struct */
43044- if(info->port.count)
43045- info->port.count--;
43046+ if(atomic_read(&info->port.count))
43047+ atomic_dec(&info->port.count);
43048 }
43049
43050 return retval;
43051@@ -816,7 +816,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43052
43053 if (debug_level >= DEBUG_LEVEL_INFO)
43054 printk("%s(%d):%s close() entry, count=%d\n",
43055- __FILE__,__LINE__, info->device_name, info->port.count);
43056+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43057
43058 if (tty_port_close_start(&info->port, tty, filp) == 0)
43059 goto cleanup;
43060@@ -835,7 +835,7 @@ static void close(struct tty_struct *tty, struct file *filp)
43061 cleanup:
43062 if (debug_level >= DEBUG_LEVEL_INFO)
43063 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
43064- tty->driver->name, info->port.count);
43065+ tty->driver->name, atomic_read(&info->port.count));
43066 }
43067
43068 /* Called by tty_hangup() when a hangup is signaled.
43069@@ -858,7 +858,7 @@ static void hangup(struct tty_struct *tty)
43070 shutdown(info);
43071
43072 spin_lock_irqsave(&info->port.lock, flags);
43073- info->port.count = 0;
43074+ atomic_set(&info->port.count, 0);
43075 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43076 info->port.tty = NULL;
43077 spin_unlock_irqrestore(&info->port.lock, flags);
43078@@ -1566,7 +1566,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43079 unsigned short new_crctype;
43080
43081 /* return error if TTY interface open */
43082- if (info->port.count)
43083+ if (atomic_read(&info->port.count))
43084 return -EBUSY;
43085
43086 switch (encoding)
43087@@ -1661,7 +1661,7 @@ static int hdlcdev_open(struct net_device *dev)
43088
43089 /* arbitrate between network and tty opens */
43090 spin_lock_irqsave(&info->netlock, flags);
43091- if (info->port.count != 0 || info->netcount != 0) {
43092+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43093 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43094 spin_unlock_irqrestore(&info->netlock, flags);
43095 return -EBUSY;
43096@@ -1747,7 +1747,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43097 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43098
43099 /* return error if TTY interface open */
43100- if (info->port.count)
43101+ if (atomic_read(&info->port.count))
43102 return -EBUSY;
43103
43104 if (cmd != SIOCWANDEV)
43105@@ -2632,7 +2632,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
43106 * do not request bottom half processing if the
43107 * device is not open in a normal mode.
43108 */
43109- if ( port && (port->port.count || port->netcount) &&
43110+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
43111 port->pending_bh && !port->bh_running &&
43112 !port->bh_requested ) {
43113 if ( debug_level >= DEBUG_LEVEL_ISR )
43114@@ -3330,12 +3330,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43115
43116 if (debug_level >= DEBUG_LEVEL_INFO)
43117 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
43118- __FILE__,__LINE__, tty->driver->name, port->count );
43119+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43120
43121 spin_lock_irqsave(&info->lock, flags);
43122 if (!tty_hung_up_p(filp)) {
43123 extra_count = true;
43124- port->count--;
43125+ atomic_dec(&port->count);
43126 }
43127 spin_unlock_irqrestore(&info->lock, flags);
43128 port->blocked_open++;
43129@@ -3364,7 +3364,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43130
43131 if (debug_level >= DEBUG_LEVEL_INFO)
43132 printk("%s(%d):%s block_til_ready() count=%d\n",
43133- __FILE__,__LINE__, tty->driver->name, port->count );
43134+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43135
43136 tty_unlock(tty);
43137 schedule();
43138@@ -3375,12 +3375,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
43139 remove_wait_queue(&port->open_wait, &wait);
43140
43141 if (extra_count)
43142- port->count++;
43143+ atomic_inc(&port->count);
43144 port->blocked_open--;
43145
43146 if (debug_level >= DEBUG_LEVEL_INFO)
43147 printk("%s(%d):%s block_til_ready() after, count=%d\n",
43148- __FILE__,__LINE__, tty->driver->name, port->count );
43149+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43150
43151 if (!retval)
43152 port->flags |= ASYNC_NORMAL_ACTIVE;
43153diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
43154index b3c4a25..723916f 100644
43155--- a/drivers/tty/sysrq.c
43156+++ b/drivers/tty/sysrq.c
43157@@ -867,7 +867,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
43158 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
43159 size_t count, loff_t *ppos)
43160 {
43161- if (count) {
43162+ if (count && capable(CAP_SYS_ADMIN)) {
43163 char c;
43164
43165 if (get_user(c, buf))
43166diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
43167index da9fde8..c07975f 100644
43168--- a/drivers/tty/tty_io.c
43169+++ b/drivers/tty/tty_io.c
43170@@ -3391,7 +3391,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
43171
43172 void tty_default_fops(struct file_operations *fops)
43173 {
43174- *fops = tty_fops;
43175+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
43176 }
43177
43178 /*
43179diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
43180index c578229..45aa9ee 100644
43181--- a/drivers/tty/tty_ldisc.c
43182+++ b/drivers/tty/tty_ldisc.c
43183@@ -56,7 +56,7 @@ static void put_ldisc(struct tty_ldisc *ld)
43184 if (atomic_dec_and_test(&ld->users)) {
43185 struct tty_ldisc_ops *ldo = ld->ops;
43186
43187- ldo->refcount--;
43188+ atomic_dec(&ldo->refcount);
43189 module_put(ldo->owner);
43190 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43191
43192@@ -91,7 +91,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
43193 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43194 tty_ldiscs[disc] = new_ldisc;
43195 new_ldisc->num = disc;
43196- new_ldisc->refcount = 0;
43197+ atomic_set(&new_ldisc->refcount, 0);
43198 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43199
43200 return ret;
43201@@ -119,7 +119,7 @@ int tty_unregister_ldisc(int disc)
43202 return -EINVAL;
43203
43204 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43205- if (tty_ldiscs[disc]->refcount)
43206+ if (atomic_read(&tty_ldiscs[disc]->refcount))
43207 ret = -EBUSY;
43208 else
43209 tty_ldiscs[disc] = NULL;
43210@@ -140,7 +140,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
43211 if (ldops) {
43212 ret = ERR_PTR(-EAGAIN);
43213 if (try_module_get(ldops->owner)) {
43214- ldops->refcount++;
43215+ atomic_inc(&ldops->refcount);
43216 ret = ldops;
43217 }
43218 }
43219@@ -153,7 +153,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
43220 unsigned long flags;
43221
43222 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
43223- ldops->refcount--;
43224+ atomic_dec(&ldops->refcount);
43225 module_put(ldops->owner);
43226 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
43227 }
43228diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
43229index b7ff59d..7c6105e 100644
43230--- a/drivers/tty/tty_port.c
43231+++ b/drivers/tty/tty_port.c
43232@@ -218,7 +218,7 @@ void tty_port_hangup(struct tty_port *port)
43233 unsigned long flags;
43234
43235 spin_lock_irqsave(&port->lock, flags);
43236- port->count = 0;
43237+ atomic_set(&port->count, 0);
43238 port->flags &= ~ASYNC_NORMAL_ACTIVE;
43239 if (port->tty) {
43240 set_bit(TTY_IO_ERROR, &port->tty->flags);
43241@@ -344,7 +344,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43242 /* The port lock protects the port counts */
43243 spin_lock_irqsave(&port->lock, flags);
43244 if (!tty_hung_up_p(filp))
43245- port->count--;
43246+ atomic_dec(&port->count);
43247 port->blocked_open++;
43248 spin_unlock_irqrestore(&port->lock, flags);
43249
43250@@ -386,7 +386,7 @@ int tty_port_block_til_ready(struct tty_port *port,
43251 we must not mess that up further */
43252 spin_lock_irqsave(&port->lock, flags);
43253 if (!tty_hung_up_p(filp))
43254- port->count++;
43255+ atomic_inc(&port->count);
43256 port->blocked_open--;
43257 if (retval == 0)
43258 port->flags |= ASYNC_NORMAL_ACTIVE;
43259@@ -406,19 +406,19 @@ int tty_port_close_start(struct tty_port *port,
43260 return 0;
43261 }
43262
43263- if (tty->count == 1 && port->count != 1) {
43264+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
43265 printk(KERN_WARNING
43266 "tty_port_close_start: tty->count = 1 port count = %d.\n",
43267- port->count);
43268- port->count = 1;
43269+ atomic_read(&port->count));
43270+ atomic_set(&port->count, 1);
43271 }
43272- if (--port->count < 0) {
43273+ if (atomic_dec_return(&port->count) < 0) {
43274 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
43275- port->count);
43276- port->count = 0;
43277+ atomic_read(&port->count));
43278+ atomic_set(&port->count, 0);
43279 }
43280
43281- if (port->count) {
43282+ if (atomic_read(&port->count)) {
43283 spin_unlock_irqrestore(&port->lock, flags);
43284 if (port->ops->drop)
43285 port->ops->drop(port);
43286@@ -516,7 +516,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
43287 {
43288 spin_lock_irq(&port->lock);
43289 if (!tty_hung_up_p(filp))
43290- ++port->count;
43291+ atomic_inc(&port->count);
43292 spin_unlock_irq(&port->lock);
43293 tty_port_tty_set(port, tty);
43294
43295diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
43296index 681765b..d3ccdf2 100644
43297--- a/drivers/tty/vt/keyboard.c
43298+++ b/drivers/tty/vt/keyboard.c
43299@@ -660,6 +660,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
43300 kbd->kbdmode == VC_OFF) &&
43301 value != KVAL(K_SAK))
43302 return; /* SAK is allowed even in raw mode */
43303+
43304+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43305+ {
43306+ void *func = fn_handler[value];
43307+ if (func == fn_show_state || func == fn_show_ptregs ||
43308+ func == fn_show_mem)
43309+ return;
43310+ }
43311+#endif
43312+
43313 fn_handler[value](vc);
43314 }
43315
43316@@ -1808,9 +1818,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43317 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
43318 return -EFAULT;
43319
43320- if (!capable(CAP_SYS_TTY_CONFIG))
43321- perm = 0;
43322-
43323 switch (cmd) {
43324 case KDGKBENT:
43325 /* Ensure another thread doesn't free it under us */
43326@@ -1825,6 +1832,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
43327 spin_unlock_irqrestore(&kbd_event_lock, flags);
43328 return put_user(val, &user_kbe->kb_value);
43329 case KDSKBENT:
43330+ if (!capable(CAP_SYS_TTY_CONFIG))
43331+ perm = 0;
43332+
43333 if (!perm)
43334 return -EPERM;
43335 if (!i && v == K_NOSUCHMAP) {
43336@@ -1915,9 +1925,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43337 int i, j, k;
43338 int ret;
43339
43340- if (!capable(CAP_SYS_TTY_CONFIG))
43341- perm = 0;
43342-
43343 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
43344 if (!kbs) {
43345 ret = -ENOMEM;
43346@@ -1951,6 +1958,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
43347 kfree(kbs);
43348 return ((p && *p) ? -EOVERFLOW : 0);
43349 case KDSKBSENT:
43350+ if (!capable(CAP_SYS_TTY_CONFIG))
43351+ perm = 0;
43352+
43353 if (!perm) {
43354 ret = -EPERM;
43355 goto reterr;
43356diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
43357index 5110f36..8dc0a74 100644
43358--- a/drivers/uio/uio.c
43359+++ b/drivers/uio/uio.c
43360@@ -25,6 +25,7 @@
43361 #include <linux/kobject.h>
43362 #include <linux/cdev.h>
43363 #include <linux/uio_driver.h>
43364+#include <asm/local.h>
43365
43366 #define UIO_MAX_DEVICES (1U << MINORBITS)
43367
43368@@ -32,10 +33,10 @@ struct uio_device {
43369 struct module *owner;
43370 struct device *dev;
43371 int minor;
43372- atomic_t event;
43373+ atomic_unchecked_t event;
43374 struct fasync_struct *async_queue;
43375 wait_queue_head_t wait;
43376- int vma_count;
43377+ local_t vma_count;
43378 struct uio_info *info;
43379 struct kobject *map_dir;
43380 struct kobject *portio_dir;
43381@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
43382 struct device_attribute *attr, char *buf)
43383 {
43384 struct uio_device *idev = dev_get_drvdata(dev);
43385- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
43386+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
43387 }
43388
43389 static struct device_attribute uio_class_attributes[] = {
43390@@ -408,7 +409,7 @@ void uio_event_notify(struct uio_info *info)
43391 {
43392 struct uio_device *idev = info->uio_dev;
43393
43394- atomic_inc(&idev->event);
43395+ atomic_inc_unchecked(&idev->event);
43396 wake_up_interruptible(&idev->wait);
43397 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
43398 }
43399@@ -461,7 +462,7 @@ static int uio_open(struct inode *inode, struct file *filep)
43400 }
43401
43402 listener->dev = idev;
43403- listener->event_count = atomic_read(&idev->event);
43404+ listener->event_count = atomic_read_unchecked(&idev->event);
43405 filep->private_data = listener;
43406
43407 if (idev->info->open) {
43408@@ -512,7 +513,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
43409 return -EIO;
43410
43411 poll_wait(filep, &idev->wait, wait);
43412- if (listener->event_count != atomic_read(&idev->event))
43413+ if (listener->event_count != atomic_read_unchecked(&idev->event))
43414 return POLLIN | POLLRDNORM;
43415 return 0;
43416 }
43417@@ -537,7 +538,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
43418 do {
43419 set_current_state(TASK_INTERRUPTIBLE);
43420
43421- event_count = atomic_read(&idev->event);
43422+ event_count = atomic_read_unchecked(&idev->event);
43423 if (event_count != listener->event_count) {
43424 if (copy_to_user(buf, &event_count, count))
43425 retval = -EFAULT;
43426@@ -606,13 +607,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
43427 static void uio_vma_open(struct vm_area_struct *vma)
43428 {
43429 struct uio_device *idev = vma->vm_private_data;
43430- idev->vma_count++;
43431+ local_inc(&idev->vma_count);
43432 }
43433
43434 static void uio_vma_close(struct vm_area_struct *vma)
43435 {
43436 struct uio_device *idev = vma->vm_private_data;
43437- idev->vma_count--;
43438+ local_dec(&idev->vma_count);
43439 }
43440
43441 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
43442@@ -819,7 +820,7 @@ int __uio_register_device(struct module *owner,
43443 idev->owner = owner;
43444 idev->info = info;
43445 init_waitqueue_head(&idev->wait);
43446- atomic_set(&idev->event, 0);
43447+ atomic_set_unchecked(&idev->event, 0);
43448
43449 ret = uio_get_minor(idev);
43450 if (ret)
43451diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
43452index b7eb86a..36d28af 100644
43453--- a/drivers/usb/atm/cxacru.c
43454+++ b/drivers/usb/atm/cxacru.c
43455@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
43456 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
43457 if (ret < 2)
43458 return -EINVAL;
43459- if (index < 0 || index > 0x7f)
43460+ if (index > 0x7f)
43461 return -EINVAL;
43462 pos += tmp;
43463
43464diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
43465index 35f10bf..6a38a0b 100644
43466--- a/drivers/usb/atm/usbatm.c
43467+++ b/drivers/usb/atm/usbatm.c
43468@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43469 if (printk_ratelimit())
43470 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
43471 __func__, vpi, vci);
43472- atomic_inc(&vcc->stats->rx_err);
43473+ atomic_inc_unchecked(&vcc->stats->rx_err);
43474 return;
43475 }
43476
43477@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43478 if (length > ATM_MAX_AAL5_PDU) {
43479 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
43480 __func__, length, vcc);
43481- atomic_inc(&vcc->stats->rx_err);
43482+ atomic_inc_unchecked(&vcc->stats->rx_err);
43483 goto out;
43484 }
43485
43486@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43487 if (sarb->len < pdu_length) {
43488 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
43489 __func__, pdu_length, sarb->len, vcc);
43490- atomic_inc(&vcc->stats->rx_err);
43491+ atomic_inc_unchecked(&vcc->stats->rx_err);
43492 goto out;
43493 }
43494
43495 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
43496 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
43497 __func__, vcc);
43498- atomic_inc(&vcc->stats->rx_err);
43499+ atomic_inc_unchecked(&vcc->stats->rx_err);
43500 goto out;
43501 }
43502
43503@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43504 if (printk_ratelimit())
43505 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
43506 __func__, length);
43507- atomic_inc(&vcc->stats->rx_drop);
43508+ atomic_inc_unchecked(&vcc->stats->rx_drop);
43509 goto out;
43510 }
43511
43512@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
43513
43514 vcc->push(vcc, skb);
43515
43516- atomic_inc(&vcc->stats->rx);
43517+ atomic_inc_unchecked(&vcc->stats->rx);
43518 out:
43519 skb_trim(sarb, 0);
43520 }
43521@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
43522 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
43523
43524 usbatm_pop(vcc, skb);
43525- atomic_inc(&vcc->stats->tx);
43526+ atomic_inc_unchecked(&vcc->stats->tx);
43527
43528 skb = skb_dequeue(&instance->sndqueue);
43529 }
43530@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
43531 if (!left--)
43532 return sprintf(page,
43533 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
43534- atomic_read(&atm_dev->stats.aal5.tx),
43535- atomic_read(&atm_dev->stats.aal5.tx_err),
43536- atomic_read(&atm_dev->stats.aal5.rx),
43537- atomic_read(&atm_dev->stats.aal5.rx_err),
43538- atomic_read(&atm_dev->stats.aal5.rx_drop));
43539+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
43540+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
43541+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
43542+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
43543+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
43544
43545 if (!left--) {
43546 if (instance->disconnected)
43547diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
43548index cbacea9..246cccd 100644
43549--- a/drivers/usb/core/devices.c
43550+++ b/drivers/usb/core/devices.c
43551@@ -126,7 +126,7 @@ static const char format_endpt[] =
43552 * time it gets called.
43553 */
43554 static struct device_connect_event {
43555- atomic_t count;
43556+ atomic_unchecked_t count;
43557 wait_queue_head_t wait;
43558 } device_event = {
43559 .count = ATOMIC_INIT(1),
43560@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
43561
43562 void usbfs_conn_disc_event(void)
43563 {
43564- atomic_add(2, &device_event.count);
43565+ atomic_add_unchecked(2, &device_event.count);
43566 wake_up(&device_event.wait);
43567 }
43568
43569@@ -645,7 +645,7 @@ static unsigned int usb_device_poll(struct file *file,
43570
43571 poll_wait(file, &device_event.wait, wait);
43572
43573- event_count = atomic_read(&device_event.count);
43574+ event_count = atomic_read_unchecked(&device_event.count);
43575 if (file->f_version != event_count) {
43576 file->f_version = event_count;
43577 return POLLIN | POLLRDNORM;
43578diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
43579index 8e64adf..9a33a3c 100644
43580--- a/drivers/usb/core/hcd.c
43581+++ b/drivers/usb/core/hcd.c
43582@@ -1522,7 +1522,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
43583 */
43584 usb_get_urb(urb);
43585 atomic_inc(&urb->use_count);
43586- atomic_inc(&urb->dev->urbnum);
43587+ atomic_inc_unchecked(&urb->dev->urbnum);
43588 usbmon_urb_submit(&hcd->self, urb);
43589
43590 /* NOTE requirements on root-hub callers (usbfs and the hub
43591@@ -1549,7 +1549,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
43592 urb->hcpriv = NULL;
43593 INIT_LIST_HEAD(&urb->urb_list);
43594 atomic_dec(&urb->use_count);
43595- atomic_dec(&urb->dev->urbnum);
43596+ atomic_dec_unchecked(&urb->dev->urbnum);
43597 if (atomic_read(&urb->reject))
43598 wake_up(&usb_kill_urb_queue);
43599 usb_put_urb(urb);
43600diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
43601index 818e4a0..0fc9589 100644
43602--- a/drivers/usb/core/sysfs.c
43603+++ b/drivers/usb/core/sysfs.c
43604@@ -226,7 +226,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
43605 struct usb_device *udev;
43606
43607 udev = to_usb_device(dev);
43608- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
43609+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
43610 }
43611 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
43612
43613diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
43614index f81b925..78d22ec 100644
43615--- a/drivers/usb/core/usb.c
43616+++ b/drivers/usb/core/usb.c
43617@@ -388,7 +388,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
43618 set_dev_node(&dev->dev, dev_to_node(bus->controller));
43619 dev->state = USB_STATE_ATTACHED;
43620 dev->lpm_disable_count = 1;
43621- atomic_set(&dev->urbnum, 0);
43622+ atomic_set_unchecked(&dev->urbnum, 0);
43623
43624 INIT_LIST_HEAD(&dev->ep0.urb_list);
43625 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
43626diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
43627index 5e29dde..eca992f 100644
43628--- a/drivers/usb/early/ehci-dbgp.c
43629+++ b/drivers/usb/early/ehci-dbgp.c
43630@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
43631
43632 #ifdef CONFIG_KGDB
43633 static struct kgdb_io kgdbdbgp_io_ops;
43634-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
43635+static struct kgdb_io kgdbdbgp_io_ops_console;
43636+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
43637 #else
43638 #define dbgp_kgdb_mode (0)
43639 #endif
43640@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
43641 .write_char = kgdbdbgp_write_char,
43642 };
43643
43644+static struct kgdb_io kgdbdbgp_io_ops_console = {
43645+ .name = "kgdbdbgp",
43646+ .read_char = kgdbdbgp_read_char,
43647+ .write_char = kgdbdbgp_write_char,
43648+ .is_console = 1
43649+};
43650+
43651 static int kgdbdbgp_wait_time;
43652
43653 static int __init kgdbdbgp_parse_config(char *str)
43654@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
43655 ptr++;
43656 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
43657 }
43658- kgdb_register_io_module(&kgdbdbgp_io_ops);
43659- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
43660+ if (early_dbgp_console.index != -1)
43661+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
43662+ else
43663+ kgdb_register_io_module(&kgdbdbgp_io_ops);
43664
43665 return 0;
43666 }
43667diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
43668index 598dcc1..032dd4f 100644
43669--- a/drivers/usb/gadget/u_serial.c
43670+++ b/drivers/usb/gadget/u_serial.c
43671@@ -735,9 +735,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
43672 spin_lock_irq(&port->port_lock);
43673
43674 /* already open? Great. */
43675- if (port->port.count) {
43676+ if (atomic_read(&port->port.count)) {
43677 status = 0;
43678- port->port.count++;
43679+ atomic_inc(&port->port.count);
43680
43681 /* currently opening/closing? wait ... */
43682 } else if (port->openclose) {
43683@@ -796,7 +796,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
43684 tty->driver_data = port;
43685 port->port.tty = tty;
43686
43687- port->port.count = 1;
43688+ atomic_set(&port->port.count, 1);
43689 port->openclose = false;
43690
43691 /* if connected, start the I/O stream */
43692@@ -838,11 +838,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
43693
43694 spin_lock_irq(&port->port_lock);
43695
43696- if (port->port.count != 1) {
43697- if (port->port.count == 0)
43698+ if (atomic_read(&port->port.count) != 1) {
43699+ if (atomic_read(&port->port.count) == 0)
43700 WARN_ON(1);
43701 else
43702- --port->port.count;
43703+ atomic_dec(&port->port.count);
43704 goto exit;
43705 }
43706
43707@@ -852,7 +852,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
43708 * and sleep if necessary
43709 */
43710 port->openclose = true;
43711- port->port.count = 0;
43712+ atomic_set(&port->port.count, 0);
43713
43714 gser = port->port_usb;
43715 if (gser && gser->disconnect)
43716@@ -1159,7 +1159,7 @@ static int gs_closed(struct gs_port *port)
43717 int cond;
43718
43719 spin_lock_irq(&port->port_lock);
43720- cond = (port->port.count == 0) && !port->openclose;
43721+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
43722 spin_unlock_irq(&port->port_lock);
43723 return cond;
43724 }
43725@@ -1273,7 +1273,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
43726 /* if it's already open, start I/O ... and notify the serial
43727 * protocol about open/close status (connect/disconnect).
43728 */
43729- if (port->port.count) {
43730+ if (atomic_read(&port->port.count)) {
43731 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
43732 gs_start_io(port);
43733 if (gser->connect)
43734@@ -1320,7 +1320,7 @@ void gserial_disconnect(struct gserial *gser)
43735
43736 port->port_usb = NULL;
43737 gser->ioport = NULL;
43738- if (port->port.count > 0 || port->openclose) {
43739+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
43740 wake_up_interruptible(&port->drain_wait);
43741 if (port->port.tty)
43742 tty_hangup(port->port.tty);
43743@@ -1336,7 +1336,7 @@ void gserial_disconnect(struct gserial *gser)
43744
43745 /* finally, free any unused/unusable I/O buffers */
43746 spin_lock_irqsave(&port->port_lock, flags);
43747- if (port->port.count == 0 && !port->openclose)
43748+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
43749 gs_buf_free(&port->port_write_buf);
43750 gs_free_requests(gser->out, &port->read_pool, NULL);
43751 gs_free_requests(gser->out, &port->read_queue, NULL);
43752diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
43753index 5f3bcd3..bfca43f 100644
43754--- a/drivers/usb/serial/console.c
43755+++ b/drivers/usb/serial/console.c
43756@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
43757
43758 info->port = port;
43759
43760- ++port->port.count;
43761+ atomic_inc(&port->port.count);
43762 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
43763 if (serial->type->set_termios) {
43764 /*
43765@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
43766 }
43767 /* Now that any required fake tty operations are completed restore
43768 * the tty port count */
43769- --port->port.count;
43770+ atomic_dec(&port->port.count);
43771 /* The console is special in terms of closing the device so
43772 * indicate this port is now acting as a system console. */
43773 port->port.console = 1;
43774@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
43775 free_tty:
43776 kfree(tty);
43777 reset_open_count:
43778- port->port.count = 0;
43779+ atomic_set(&port->port.count, 0);
43780 usb_autopm_put_interface(serial->interface);
43781 error_get_interface:
43782 usb_serial_put(serial);
43783diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
43784index 75f70f0..d467e1a 100644
43785--- a/drivers/usb/storage/usb.h
43786+++ b/drivers/usb/storage/usb.h
43787@@ -63,7 +63,7 @@ struct us_unusual_dev {
43788 __u8 useProtocol;
43789 __u8 useTransport;
43790 int (*initFunction)(struct us_data *);
43791-};
43792+} __do_const;
43793
43794
43795 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
43796diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
43797index d6bea3e..60b250e 100644
43798--- a/drivers/usb/wusbcore/wa-hc.h
43799+++ b/drivers/usb/wusbcore/wa-hc.h
43800@@ -192,7 +192,7 @@ struct wahc {
43801 struct list_head xfer_delayed_list;
43802 spinlock_t xfer_list_lock;
43803 struct work_struct xfer_work;
43804- atomic_t xfer_id_count;
43805+ atomic_unchecked_t xfer_id_count;
43806 };
43807
43808
43809@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
43810 INIT_LIST_HEAD(&wa->xfer_delayed_list);
43811 spin_lock_init(&wa->xfer_list_lock);
43812 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
43813- atomic_set(&wa->xfer_id_count, 1);
43814+ atomic_set_unchecked(&wa->xfer_id_count, 1);
43815 }
43816
43817 /**
43818diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
43819index 57c01ab..8a05959 100644
43820--- a/drivers/usb/wusbcore/wa-xfer.c
43821+++ b/drivers/usb/wusbcore/wa-xfer.c
43822@@ -296,7 +296,7 @@ out:
43823 */
43824 static void wa_xfer_id_init(struct wa_xfer *xfer)
43825 {
43826- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
43827+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
43828 }
43829
43830 /*
43831diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
43832index 8c55011..eed4ae1a 100644
43833--- a/drivers/video/aty/aty128fb.c
43834+++ b/drivers/video/aty/aty128fb.c
43835@@ -149,7 +149,7 @@ enum {
43836 };
43837
43838 /* Must match above enum */
43839-static char * const r128_family[] = {
43840+static const char * const r128_family[] = {
43841 "AGP",
43842 "PCI",
43843 "PRO AGP",
43844diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
43845index 4f27fdc..d3537e6 100644
43846--- a/drivers/video/aty/atyfb_base.c
43847+++ b/drivers/video/aty/atyfb_base.c
43848@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
43849 par->accel_flags = var->accel_flags; /* hack */
43850
43851 if (var->accel_flags) {
43852- info->fbops->fb_sync = atyfb_sync;
43853+ pax_open_kernel();
43854+ *(void **)&info->fbops->fb_sync = atyfb_sync;
43855+ pax_close_kernel();
43856 info->flags &= ~FBINFO_HWACCEL_DISABLED;
43857 } else {
43858- info->fbops->fb_sync = NULL;
43859+ pax_open_kernel();
43860+ *(void **)&info->fbops->fb_sync = NULL;
43861+ pax_close_kernel();
43862 info->flags |= FBINFO_HWACCEL_DISABLED;
43863 }
43864
43865diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
43866index 95ec042..ae33e7a 100644
43867--- a/drivers/video/aty/mach64_cursor.c
43868+++ b/drivers/video/aty/mach64_cursor.c
43869@@ -208,7 +208,9 @@ int aty_init_cursor(struct fb_info *info)
43870 info->sprite.buf_align = 16; /* and 64 lines tall. */
43871 info->sprite.flags = FB_PIXMAP_IO;
43872
43873- info->fbops->fb_cursor = atyfb_cursor;
43874+ pax_open_kernel();
43875+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
43876+ pax_close_kernel();
43877
43878 return 0;
43879 }
43880diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
43881index 6c5ed6b..b727c88 100644
43882--- a/drivers/video/backlight/kb3886_bl.c
43883+++ b/drivers/video/backlight/kb3886_bl.c
43884@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
43885 static unsigned long kb3886bl_flags;
43886 #define KB3886BL_SUSPENDED 0x01
43887
43888-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
43889+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
43890 {
43891 .ident = "Sahara Touch-iT",
43892 .matches = {
43893diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
43894index 88cad6b..dd746c7 100644
43895--- a/drivers/video/fb_defio.c
43896+++ b/drivers/video/fb_defio.c
43897@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
43898
43899 BUG_ON(!fbdefio);
43900 mutex_init(&fbdefio->lock);
43901- info->fbops->fb_mmap = fb_deferred_io_mmap;
43902+ pax_open_kernel();
43903+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
43904+ pax_close_kernel();
43905 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
43906 INIT_LIST_HEAD(&fbdefio->pagelist);
43907 if (fbdefio->delay == 0) /* set a default of 1 s */
43908@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
43909 page->mapping = NULL;
43910 }
43911
43912- info->fbops->fb_mmap = NULL;
43913+ *(void **)&info->fbops->fb_mmap = NULL;
43914 mutex_destroy(&fbdefio->lock);
43915 }
43916 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
43917diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43918index 5c3960d..15cf8fc 100644
43919--- a/drivers/video/fbcmap.c
43920+++ b/drivers/video/fbcmap.c
43921@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43922 rc = -ENODEV;
43923 goto out;
43924 }
43925- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43926- !info->fbops->fb_setcmap)) {
43927+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43928 rc = -EINVAL;
43929 goto out1;
43930 }
43931diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43932index dc61c12..e29796e 100644
43933--- a/drivers/video/fbmem.c
43934+++ b/drivers/video/fbmem.c
43935@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43936 image->dx += image->width + 8;
43937 }
43938 } else if (rotate == FB_ROTATE_UD) {
43939- for (x = 0; x < num && image->dx >= 0; x++) {
43940+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43941 info->fbops->fb_imageblit(info, image);
43942 image->dx -= image->width + 8;
43943 }
43944@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43945 image->dy += image->height + 8;
43946 }
43947 } else if (rotate == FB_ROTATE_CCW) {
43948- for (x = 0; x < num && image->dy >= 0; x++) {
43949+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43950 info->fbops->fb_imageblit(info, image);
43951 image->dy -= image->height + 8;
43952 }
43953@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43954 return -EFAULT;
43955 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43956 return -EINVAL;
43957- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43958+ if (con2fb.framebuffer >= FB_MAX)
43959 return -EINVAL;
43960 if (!registered_fb[con2fb.framebuffer])
43961 request_module("fb%d", con2fb.framebuffer);
43962diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43963index 7672d2e..b56437f 100644
43964--- a/drivers/video/i810/i810_accel.c
43965+++ b/drivers/video/i810/i810_accel.c
43966@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43967 }
43968 }
43969 printk("ringbuffer lockup!!!\n");
43970+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43971 i810_report_error(mmio);
43972 par->dev_flags |= LOCKUP;
43973 info->pixmap.scan_align = 1;
43974diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43975index 3c14e43..eafa544 100644
43976--- a/drivers/video/logo/logo_linux_clut224.ppm
43977+++ b/drivers/video/logo/logo_linux_clut224.ppm
43978@@ -1,1604 +1,1123 @@
43979 P3
43980-# Standard 224-color Linux logo
43981 80 80
43982 255
43983- 0 0 0 0 0 0 0 0 0 0 0 0
43984- 0 0 0 0 0 0 0 0 0 0 0 0
43985- 0 0 0 0 0 0 0 0 0 0 0 0
43986- 0 0 0 0 0 0 0 0 0 0 0 0
43987- 0 0 0 0 0 0 0 0 0 0 0 0
43988- 0 0 0 0 0 0 0 0 0 0 0 0
43989- 0 0 0 0 0 0 0 0 0 0 0 0
43990- 0 0 0 0 0 0 0 0 0 0 0 0
43991- 0 0 0 0 0 0 0 0 0 0 0 0
43992- 6 6 6 6 6 6 10 10 10 10 10 10
43993- 10 10 10 6 6 6 6 6 6 6 6 6
43994- 0 0 0 0 0 0 0 0 0 0 0 0
43995- 0 0 0 0 0 0 0 0 0 0 0 0
43996- 0 0 0 0 0 0 0 0 0 0 0 0
43997- 0 0 0 0 0 0 0 0 0 0 0 0
43998- 0 0 0 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 0 0 0 0 0 0 0 0 0
44003- 0 0 0 0 0 0 0 0 0 0 0 0
44004- 0 0 0 0 0 0 0 0 0 0 0 0
44005- 0 0 0 0 0 0 0 0 0 0 0 0
44006- 0 0 0 0 0 0 0 0 0 0 0 0
44007- 0 0 0 0 0 0 0 0 0 0 0 0
44008- 0 0 0 0 0 0 0 0 0 0 0 0
44009- 0 0 0 0 0 0 0 0 0 0 0 0
44010- 0 0 0 0 0 0 0 0 0 0 0 0
44011- 0 0 0 6 6 6 10 10 10 14 14 14
44012- 22 22 22 26 26 26 30 30 30 34 34 34
44013- 30 30 30 30 30 30 26 26 26 18 18 18
44014- 14 14 14 10 10 10 6 6 6 0 0 0
44015- 0 0 0 0 0 0 0 0 0 0 0 0
44016- 0 0 0 0 0 0 0 0 0 0 0 0
44017- 0 0 0 0 0 0 0 0 0 0 0 0
44018- 0 0 0 0 0 0 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 0 0 0 0 0 0 0 0 0
44023- 0 0 0 0 0 0 0 0 0 0 0 0
44024- 0 0 0 0 0 1 0 0 1 0 0 0
44025- 0 0 0 0 0 0 0 0 0 0 0 0
44026- 0 0 0 0 0 0 0 0 0 0 0 0
44027- 0 0 0 0 0 0 0 0 0 0 0 0
44028- 0 0 0 0 0 0 0 0 0 0 0 0
44029- 0 0 0 0 0 0 0 0 0 0 0 0
44030- 0 0 0 0 0 0 0 0 0 0 0 0
44031- 6 6 6 14 14 14 26 26 26 42 42 42
44032- 54 54 54 66 66 66 78 78 78 78 78 78
44033- 78 78 78 74 74 74 66 66 66 54 54 54
44034- 42 42 42 26 26 26 18 18 18 10 10 10
44035- 6 6 6 0 0 0 0 0 0 0 0 0
44036- 0 0 0 0 0 0 0 0 0 0 0 0
44037- 0 0 0 0 0 0 0 0 0 0 0 0
44038- 0 0 0 0 0 0 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 0 0 0 0 0 0 0 0 0 0 0 0
44043- 0 0 0 0 0 0 0 0 0 0 0 0
44044- 0 0 1 0 0 0 0 0 0 0 0 0
44045- 0 0 0 0 0 0 0 0 0 0 0 0
44046- 0 0 0 0 0 0 0 0 0 0 0 0
44047- 0 0 0 0 0 0 0 0 0 0 0 0
44048- 0 0 0 0 0 0 0 0 0 0 0 0
44049- 0 0 0 0 0 0 0 0 0 0 0 0
44050- 0 0 0 0 0 0 0 0 0 10 10 10
44051- 22 22 22 42 42 42 66 66 66 86 86 86
44052- 66 66 66 38 38 38 38 38 38 22 22 22
44053- 26 26 26 34 34 34 54 54 54 66 66 66
44054- 86 86 86 70 70 70 46 46 46 26 26 26
44055- 14 14 14 6 6 6 0 0 0 0 0 0
44056- 0 0 0 0 0 0 0 0 0 0 0 0
44057- 0 0 0 0 0 0 0 0 0 0 0 0
44058- 0 0 0 0 0 0 0 0 0 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 0 0 0 0 0 0 0 0 0 0 0 0
44063- 0 0 0 0 0 0 0 0 0 0 0 0
44064- 0 0 1 0 0 1 0 0 1 0 0 0
44065- 0 0 0 0 0 0 0 0 0 0 0 0
44066- 0 0 0 0 0 0 0 0 0 0 0 0
44067- 0 0 0 0 0 0 0 0 0 0 0 0
44068- 0 0 0 0 0 0 0 0 0 0 0 0
44069- 0 0 0 0 0 0 0 0 0 0 0 0
44070- 0 0 0 0 0 0 10 10 10 26 26 26
44071- 50 50 50 82 82 82 58 58 58 6 6 6
44072- 2 2 6 2 2 6 2 2 6 2 2 6
44073- 2 2 6 2 2 6 2 2 6 2 2 6
44074- 6 6 6 54 54 54 86 86 86 66 66 66
44075- 38 38 38 18 18 18 6 6 6 0 0 0
44076- 0 0 0 0 0 0 0 0 0 0 0 0
44077- 0 0 0 0 0 0 0 0 0 0 0 0
44078- 0 0 0 0 0 0 0 0 0 0 0 0
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 0 0 0
44082- 0 0 0 0 0 0 0 0 0 0 0 0
44083- 0 0 0 0 0 0 0 0 0 0 0 0
44084- 0 0 0 0 0 0 0 0 0 0 0 0
44085- 0 0 0 0 0 0 0 0 0 0 0 0
44086- 0 0 0 0 0 0 0 0 0 0 0 0
44087- 0 0 0 0 0 0 0 0 0 0 0 0
44088- 0 0 0 0 0 0 0 0 0 0 0 0
44089- 0 0 0 0 0 0 0 0 0 0 0 0
44090- 0 0 0 6 6 6 22 22 22 50 50 50
44091- 78 78 78 34 34 34 2 2 6 2 2 6
44092- 2 2 6 2 2 6 2 2 6 2 2 6
44093- 2 2 6 2 2 6 2 2 6 2 2 6
44094- 2 2 6 2 2 6 6 6 6 70 70 70
44095- 78 78 78 46 46 46 22 22 22 6 6 6
44096- 0 0 0 0 0 0 0 0 0 0 0 0
44097- 0 0 0 0 0 0 0 0 0 0 0 0
44098- 0 0 0 0 0 0 0 0 0 0 0 0
44099- 0 0 0 0 0 0 0 0 0 0 0 0
44100- 0 0 0 0 0 0 0 0 0 0 0 0
44101- 0 0 0 0 0 0 0 0 0 0 0 0
44102- 0 0 0 0 0 0 0 0 0 0 0 0
44103- 0 0 0 0 0 0 0 0 0 0 0 0
44104- 0 0 1 0 0 1 0 0 1 0 0 0
44105- 0 0 0 0 0 0 0 0 0 0 0 0
44106- 0 0 0 0 0 0 0 0 0 0 0 0
44107- 0 0 0 0 0 0 0 0 0 0 0 0
44108- 0 0 0 0 0 0 0 0 0 0 0 0
44109- 0 0 0 0 0 0 0 0 0 0 0 0
44110- 6 6 6 18 18 18 42 42 42 82 82 82
44111- 26 26 26 2 2 6 2 2 6 2 2 6
44112- 2 2 6 2 2 6 2 2 6 2 2 6
44113- 2 2 6 2 2 6 2 2 6 14 14 14
44114- 46 46 46 34 34 34 6 6 6 2 2 6
44115- 42 42 42 78 78 78 42 42 42 18 18 18
44116- 6 6 6 0 0 0 0 0 0 0 0 0
44117- 0 0 0 0 0 0 0 0 0 0 0 0
44118- 0 0 0 0 0 0 0 0 0 0 0 0
44119- 0 0 0 0 0 0 0 0 0 0 0 0
44120- 0 0 0 0 0 0 0 0 0 0 0 0
44121- 0 0 0 0 0 0 0 0 0 0 0 0
44122- 0 0 0 0 0 0 0 0 0 0 0 0
44123- 0 0 0 0 0 0 0 0 0 0 0 0
44124- 0 0 1 0 0 0 0 0 1 0 0 0
44125- 0 0 0 0 0 0 0 0 0 0 0 0
44126- 0 0 0 0 0 0 0 0 0 0 0 0
44127- 0 0 0 0 0 0 0 0 0 0 0 0
44128- 0 0 0 0 0 0 0 0 0 0 0 0
44129- 0 0 0 0 0 0 0 0 0 0 0 0
44130- 10 10 10 30 30 30 66 66 66 58 58 58
44131- 2 2 6 2 2 6 2 2 6 2 2 6
44132- 2 2 6 2 2 6 2 2 6 2 2 6
44133- 2 2 6 2 2 6 2 2 6 26 26 26
44134- 86 86 86 101 101 101 46 46 46 10 10 10
44135- 2 2 6 58 58 58 70 70 70 34 34 34
44136- 10 10 10 0 0 0 0 0 0 0 0 0
44137- 0 0 0 0 0 0 0 0 0 0 0 0
44138- 0 0 0 0 0 0 0 0 0 0 0 0
44139- 0 0 0 0 0 0 0 0 0 0 0 0
44140- 0 0 0 0 0 0 0 0 0 0 0 0
44141- 0 0 0 0 0 0 0 0 0 0 0 0
44142- 0 0 0 0 0 0 0 0 0 0 0 0
44143- 0 0 0 0 0 0 0 0 0 0 0 0
44144- 0 0 1 0 0 1 0 0 1 0 0 0
44145- 0 0 0 0 0 0 0 0 0 0 0 0
44146- 0 0 0 0 0 0 0 0 0 0 0 0
44147- 0 0 0 0 0 0 0 0 0 0 0 0
44148- 0 0 0 0 0 0 0 0 0 0 0 0
44149- 0 0 0 0 0 0 0 0 0 0 0 0
44150- 14 14 14 42 42 42 86 86 86 10 10 10
44151- 2 2 6 2 2 6 2 2 6 2 2 6
44152- 2 2 6 2 2 6 2 2 6 2 2 6
44153- 2 2 6 2 2 6 2 2 6 30 30 30
44154- 94 94 94 94 94 94 58 58 58 26 26 26
44155- 2 2 6 6 6 6 78 78 78 54 54 54
44156- 22 22 22 6 6 6 0 0 0 0 0 0
44157- 0 0 0 0 0 0 0 0 0 0 0 0
44158- 0 0 0 0 0 0 0 0 0 0 0 0
44159- 0 0 0 0 0 0 0 0 0 0 0 0
44160- 0 0 0 0 0 0 0 0 0 0 0 0
44161- 0 0 0 0 0 0 0 0 0 0 0 0
44162- 0 0 0 0 0 0 0 0 0 0 0 0
44163- 0 0 0 0 0 0 0 0 0 0 0 0
44164- 0 0 0 0 0 0 0 0 0 0 0 0
44165- 0 0 0 0 0 0 0 0 0 0 0 0
44166- 0 0 0 0 0 0 0 0 0 0 0 0
44167- 0 0 0 0 0 0 0 0 0 0 0 0
44168- 0 0 0 0 0 0 0 0 0 0 0 0
44169- 0 0 0 0 0 0 0 0 0 6 6 6
44170- 22 22 22 62 62 62 62 62 62 2 2 6
44171- 2 2 6 2 2 6 2 2 6 2 2 6
44172- 2 2 6 2 2 6 2 2 6 2 2 6
44173- 2 2 6 2 2 6 2 2 6 26 26 26
44174- 54 54 54 38 38 38 18 18 18 10 10 10
44175- 2 2 6 2 2 6 34 34 34 82 82 82
44176- 38 38 38 14 14 14 0 0 0 0 0 0
44177- 0 0 0 0 0 0 0 0 0 0 0 0
44178- 0 0 0 0 0 0 0 0 0 0 0 0
44179- 0 0 0 0 0 0 0 0 0 0 0 0
44180- 0 0 0 0 0 0 0 0 0 0 0 0
44181- 0 0 0 0 0 0 0 0 0 0 0 0
44182- 0 0 0 0 0 0 0 0 0 0 0 0
44183- 0 0 0 0 0 0 0 0 0 0 0 0
44184- 0 0 0 0 0 1 0 0 1 0 0 0
44185- 0 0 0 0 0 0 0 0 0 0 0 0
44186- 0 0 0 0 0 0 0 0 0 0 0 0
44187- 0 0 0 0 0 0 0 0 0 0 0 0
44188- 0 0 0 0 0 0 0 0 0 0 0 0
44189- 0 0 0 0 0 0 0 0 0 6 6 6
44190- 30 30 30 78 78 78 30 30 30 2 2 6
44191- 2 2 6 2 2 6 2 2 6 2 2 6
44192- 2 2 6 2 2 6 2 2 6 2 2 6
44193- 2 2 6 2 2 6 2 2 6 10 10 10
44194- 10 10 10 2 2 6 2 2 6 2 2 6
44195- 2 2 6 2 2 6 2 2 6 78 78 78
44196- 50 50 50 18 18 18 6 6 6 0 0 0
44197- 0 0 0 0 0 0 0 0 0 0 0 0
44198- 0 0 0 0 0 0 0 0 0 0 0 0
44199- 0 0 0 0 0 0 0 0 0 0 0 0
44200- 0 0 0 0 0 0 0 0 0 0 0 0
44201- 0 0 0 0 0 0 0 0 0 0 0 0
44202- 0 0 0 0 0 0 0 0 0 0 0 0
44203- 0 0 0 0 0 0 0 0 0 0 0 0
44204- 0 0 1 0 0 0 0 0 0 0 0 0
44205- 0 0 0 0 0 0 0 0 0 0 0 0
44206- 0 0 0 0 0 0 0 0 0 0 0 0
44207- 0 0 0 0 0 0 0 0 0 0 0 0
44208- 0 0 0 0 0 0 0 0 0 0 0 0
44209- 0 0 0 0 0 0 0 0 0 10 10 10
44210- 38 38 38 86 86 86 14 14 14 2 2 6
44211- 2 2 6 2 2 6 2 2 6 2 2 6
44212- 2 2 6 2 2 6 2 2 6 2 2 6
44213- 2 2 6 2 2 6 2 2 6 2 2 6
44214- 2 2 6 2 2 6 2 2 6 2 2 6
44215- 2 2 6 2 2 6 2 2 6 54 54 54
44216- 66 66 66 26 26 26 6 6 6 0 0 0
44217- 0 0 0 0 0 0 0 0 0 0 0 0
44218- 0 0 0 0 0 0 0 0 0 0 0 0
44219- 0 0 0 0 0 0 0 0 0 0 0 0
44220- 0 0 0 0 0 0 0 0 0 0 0 0
44221- 0 0 0 0 0 0 0 0 0 0 0 0
44222- 0 0 0 0 0 0 0 0 0 0 0 0
44223- 0 0 0 0 0 0 0 0 0 0 0 0
44224- 0 0 0 0 0 1 0 0 1 0 0 0
44225- 0 0 0 0 0 0 0 0 0 0 0 0
44226- 0 0 0 0 0 0 0 0 0 0 0 0
44227- 0 0 0 0 0 0 0 0 0 0 0 0
44228- 0 0 0 0 0 0 0 0 0 0 0 0
44229- 0 0 0 0 0 0 0 0 0 14 14 14
44230- 42 42 42 82 82 82 2 2 6 2 2 6
44231- 2 2 6 6 6 6 10 10 10 2 2 6
44232- 2 2 6 2 2 6 2 2 6 2 2 6
44233- 2 2 6 2 2 6 2 2 6 6 6 6
44234- 14 14 14 10 10 10 2 2 6 2 2 6
44235- 2 2 6 2 2 6 2 2 6 18 18 18
44236- 82 82 82 34 34 34 10 10 10 0 0 0
44237- 0 0 0 0 0 0 0 0 0 0 0 0
44238- 0 0 0 0 0 0 0 0 0 0 0 0
44239- 0 0 0 0 0 0 0 0 0 0 0 0
44240- 0 0 0 0 0 0 0 0 0 0 0 0
44241- 0 0 0 0 0 0 0 0 0 0 0 0
44242- 0 0 0 0 0 0 0 0 0 0 0 0
44243- 0 0 0 0 0 0 0 0 0 0 0 0
44244- 0 0 1 0 0 0 0 0 0 0 0 0
44245- 0 0 0 0 0 0 0 0 0 0 0 0
44246- 0 0 0 0 0 0 0 0 0 0 0 0
44247- 0 0 0 0 0 0 0 0 0 0 0 0
44248- 0 0 0 0 0 0 0 0 0 0 0 0
44249- 0 0 0 0 0 0 0 0 0 14 14 14
44250- 46 46 46 86 86 86 2 2 6 2 2 6
44251- 6 6 6 6 6 6 22 22 22 34 34 34
44252- 6 6 6 2 2 6 2 2 6 2 2 6
44253- 2 2 6 2 2 6 18 18 18 34 34 34
44254- 10 10 10 50 50 50 22 22 22 2 2 6
44255- 2 2 6 2 2 6 2 2 6 10 10 10
44256- 86 86 86 42 42 42 14 14 14 0 0 0
44257- 0 0 0 0 0 0 0 0 0 0 0 0
44258- 0 0 0 0 0 0 0 0 0 0 0 0
44259- 0 0 0 0 0 0 0 0 0 0 0 0
44260- 0 0 0 0 0 0 0 0 0 0 0 0
44261- 0 0 0 0 0 0 0 0 0 0 0 0
44262- 0 0 0 0 0 0 0 0 0 0 0 0
44263- 0 0 0 0 0 0 0 0 0 0 0 0
44264- 0 0 1 0 0 1 0 0 1 0 0 0
44265- 0 0 0 0 0 0 0 0 0 0 0 0
44266- 0 0 0 0 0 0 0 0 0 0 0 0
44267- 0 0 0 0 0 0 0 0 0 0 0 0
44268- 0 0 0 0 0 0 0 0 0 0 0 0
44269- 0 0 0 0 0 0 0 0 0 14 14 14
44270- 46 46 46 86 86 86 2 2 6 2 2 6
44271- 38 38 38 116 116 116 94 94 94 22 22 22
44272- 22 22 22 2 2 6 2 2 6 2 2 6
44273- 14 14 14 86 86 86 138 138 138 162 162 162
44274-154 154 154 38 38 38 26 26 26 6 6 6
44275- 2 2 6 2 2 6 2 2 6 2 2 6
44276- 86 86 86 46 46 46 14 14 14 0 0 0
44277- 0 0 0 0 0 0 0 0 0 0 0 0
44278- 0 0 0 0 0 0 0 0 0 0 0 0
44279- 0 0 0 0 0 0 0 0 0 0 0 0
44280- 0 0 0 0 0 0 0 0 0 0 0 0
44281- 0 0 0 0 0 0 0 0 0 0 0 0
44282- 0 0 0 0 0 0 0 0 0 0 0 0
44283- 0 0 0 0 0 0 0 0 0 0 0 0
44284- 0 0 0 0 0 0 0 0 0 0 0 0
44285- 0 0 0 0 0 0 0 0 0 0 0 0
44286- 0 0 0 0 0 0 0 0 0 0 0 0
44287- 0 0 0 0 0 0 0 0 0 0 0 0
44288- 0 0 0 0 0 0 0 0 0 0 0 0
44289- 0 0 0 0 0 0 0 0 0 14 14 14
44290- 46 46 46 86 86 86 2 2 6 14 14 14
44291-134 134 134 198 198 198 195 195 195 116 116 116
44292- 10 10 10 2 2 6 2 2 6 6 6 6
44293-101 98 89 187 187 187 210 210 210 218 218 218
44294-214 214 214 134 134 134 14 14 14 6 6 6
44295- 2 2 6 2 2 6 2 2 6 2 2 6
44296- 86 86 86 50 50 50 18 18 18 6 6 6
44297- 0 0 0 0 0 0 0 0 0 0 0 0
44298- 0 0 0 0 0 0 0 0 0 0 0 0
44299- 0 0 0 0 0 0 0 0 0 0 0 0
44300- 0 0 0 0 0 0 0 0 0 0 0 0
44301- 0 0 0 0 0 0 0 0 0 0 0 0
44302- 0 0 0 0 0 0 0 0 0 0 0 0
44303- 0 0 0 0 0 0 0 0 1 0 0 0
44304- 0 0 1 0 0 1 0 0 1 0 0 0
44305- 0 0 0 0 0 0 0 0 0 0 0 0
44306- 0 0 0 0 0 0 0 0 0 0 0 0
44307- 0 0 0 0 0 0 0 0 0 0 0 0
44308- 0 0 0 0 0 0 0 0 0 0 0 0
44309- 0 0 0 0 0 0 0 0 0 14 14 14
44310- 46 46 46 86 86 86 2 2 6 54 54 54
44311-218 218 218 195 195 195 226 226 226 246 246 246
44312- 58 58 58 2 2 6 2 2 6 30 30 30
44313-210 210 210 253 253 253 174 174 174 123 123 123
44314-221 221 221 234 234 234 74 74 74 2 2 6
44315- 2 2 6 2 2 6 2 2 6 2 2 6
44316- 70 70 70 58 58 58 22 22 22 6 6 6
44317- 0 0 0 0 0 0 0 0 0 0 0 0
44318- 0 0 0 0 0 0 0 0 0 0 0 0
44319- 0 0 0 0 0 0 0 0 0 0 0 0
44320- 0 0 0 0 0 0 0 0 0 0 0 0
44321- 0 0 0 0 0 0 0 0 0 0 0 0
44322- 0 0 0 0 0 0 0 0 0 0 0 0
44323- 0 0 0 0 0 0 0 0 0 0 0 0
44324- 0 0 0 0 0 0 0 0 0 0 0 0
44325- 0 0 0 0 0 0 0 0 0 0 0 0
44326- 0 0 0 0 0 0 0 0 0 0 0 0
44327- 0 0 0 0 0 0 0 0 0 0 0 0
44328- 0 0 0 0 0 0 0 0 0 0 0 0
44329- 0 0 0 0 0 0 0 0 0 14 14 14
44330- 46 46 46 82 82 82 2 2 6 106 106 106
44331-170 170 170 26 26 26 86 86 86 226 226 226
44332-123 123 123 10 10 10 14 14 14 46 46 46
44333-231 231 231 190 190 190 6 6 6 70 70 70
44334- 90 90 90 238 238 238 158 158 158 2 2 6
44335- 2 2 6 2 2 6 2 2 6 2 2 6
44336- 70 70 70 58 58 58 22 22 22 6 6 6
44337- 0 0 0 0 0 0 0 0 0 0 0 0
44338- 0 0 0 0 0 0 0 0 0 0 0 0
44339- 0 0 0 0 0 0 0 0 0 0 0 0
44340- 0 0 0 0 0 0 0 0 0 0 0 0
44341- 0 0 0 0 0 0 0 0 0 0 0 0
44342- 0 0 0 0 0 0 0 0 0 0 0 0
44343- 0 0 0 0 0 0 0 0 1 0 0 0
44344- 0 0 1 0 0 1 0 0 1 0 0 0
44345- 0 0 0 0 0 0 0 0 0 0 0 0
44346- 0 0 0 0 0 0 0 0 0 0 0 0
44347- 0 0 0 0 0 0 0 0 0 0 0 0
44348- 0 0 0 0 0 0 0 0 0 0 0 0
44349- 0 0 0 0 0 0 0 0 0 14 14 14
44350- 42 42 42 86 86 86 6 6 6 116 116 116
44351-106 106 106 6 6 6 70 70 70 149 149 149
44352-128 128 128 18 18 18 38 38 38 54 54 54
44353-221 221 221 106 106 106 2 2 6 14 14 14
44354- 46 46 46 190 190 190 198 198 198 2 2 6
44355- 2 2 6 2 2 6 2 2 6 2 2 6
44356- 74 74 74 62 62 62 22 22 22 6 6 6
44357- 0 0 0 0 0 0 0 0 0 0 0 0
44358- 0 0 0 0 0 0 0 0 0 0 0 0
44359- 0 0 0 0 0 0 0 0 0 0 0 0
44360- 0 0 0 0 0 0 0 0 0 0 0 0
44361- 0 0 0 0 0 0 0 0 0 0 0 0
44362- 0 0 0 0 0 0 0 0 0 0 0 0
44363- 0 0 0 0 0 0 0 0 1 0 0 0
44364- 0 0 1 0 0 0 0 0 1 0 0 0
44365- 0 0 0 0 0 0 0 0 0 0 0 0
44366- 0 0 0 0 0 0 0 0 0 0 0 0
44367- 0 0 0 0 0 0 0 0 0 0 0 0
44368- 0 0 0 0 0 0 0 0 0 0 0 0
44369- 0 0 0 0 0 0 0 0 0 14 14 14
44370- 42 42 42 94 94 94 14 14 14 101 101 101
44371-128 128 128 2 2 6 18 18 18 116 116 116
44372-118 98 46 121 92 8 121 92 8 98 78 10
44373-162 162 162 106 106 106 2 2 6 2 2 6
44374- 2 2 6 195 195 195 195 195 195 6 6 6
44375- 2 2 6 2 2 6 2 2 6 2 2 6
44376- 74 74 74 62 62 62 22 22 22 6 6 6
44377- 0 0 0 0 0 0 0 0 0 0 0 0
44378- 0 0 0 0 0 0 0 0 0 0 0 0
44379- 0 0 0 0 0 0 0 0 0 0 0 0
44380- 0 0 0 0 0 0 0 0 0 0 0 0
44381- 0 0 0 0 0 0 0 0 0 0 0 0
44382- 0 0 0 0 0 0 0 0 0 0 0 0
44383- 0 0 0 0 0 0 0 0 1 0 0 1
44384- 0 0 1 0 0 0 0 0 1 0 0 0
44385- 0 0 0 0 0 0 0 0 0 0 0 0
44386- 0 0 0 0 0 0 0 0 0 0 0 0
44387- 0 0 0 0 0 0 0 0 0 0 0 0
44388- 0 0 0 0 0 0 0 0 0 0 0 0
44389- 0 0 0 0 0 0 0 0 0 10 10 10
44390- 38 38 38 90 90 90 14 14 14 58 58 58
44391-210 210 210 26 26 26 54 38 6 154 114 10
44392-226 170 11 236 186 11 225 175 15 184 144 12
44393-215 174 15 175 146 61 37 26 9 2 2 6
44394- 70 70 70 246 246 246 138 138 138 2 2 6
44395- 2 2 6 2 2 6 2 2 6 2 2 6
44396- 70 70 70 66 66 66 26 26 26 6 6 6
44397- 0 0 0 0 0 0 0 0 0 0 0 0
44398- 0 0 0 0 0 0 0 0 0 0 0 0
44399- 0 0 0 0 0 0 0 0 0 0 0 0
44400- 0 0 0 0 0 0 0 0 0 0 0 0
44401- 0 0 0 0 0 0 0 0 0 0 0 0
44402- 0 0 0 0 0 0 0 0 0 0 0 0
44403- 0 0 0 0 0 0 0 0 0 0 0 0
44404- 0 0 0 0 0 0 0 0 0 0 0 0
44405- 0 0 0 0 0 0 0 0 0 0 0 0
44406- 0 0 0 0 0 0 0 0 0 0 0 0
44407- 0 0 0 0 0 0 0 0 0 0 0 0
44408- 0 0 0 0 0 0 0 0 0 0 0 0
44409- 0 0 0 0 0 0 0 0 0 10 10 10
44410- 38 38 38 86 86 86 14 14 14 10 10 10
44411-195 195 195 188 164 115 192 133 9 225 175 15
44412-239 182 13 234 190 10 232 195 16 232 200 30
44413-245 207 45 241 208 19 232 195 16 184 144 12
44414-218 194 134 211 206 186 42 42 42 2 2 6
44415- 2 2 6 2 2 6 2 2 6 2 2 6
44416- 50 50 50 74 74 74 30 30 30 6 6 6
44417- 0 0 0 0 0 0 0 0 0 0 0 0
44418- 0 0 0 0 0 0 0 0 0 0 0 0
44419- 0 0 0 0 0 0 0 0 0 0 0 0
44420- 0 0 0 0 0 0 0 0 0 0 0 0
44421- 0 0 0 0 0 0 0 0 0 0 0 0
44422- 0 0 0 0 0 0 0 0 0 0 0 0
44423- 0 0 0 0 0 0 0 0 0 0 0 0
44424- 0 0 0 0 0 0 0 0 0 0 0 0
44425- 0 0 0 0 0 0 0 0 0 0 0 0
44426- 0 0 0 0 0 0 0 0 0 0 0 0
44427- 0 0 0 0 0 0 0 0 0 0 0 0
44428- 0 0 0 0 0 0 0 0 0 0 0 0
44429- 0 0 0 0 0 0 0 0 0 10 10 10
44430- 34 34 34 86 86 86 14 14 14 2 2 6
44431-121 87 25 192 133 9 219 162 10 239 182 13
44432-236 186 11 232 195 16 241 208 19 244 214 54
44433-246 218 60 246 218 38 246 215 20 241 208 19
44434-241 208 19 226 184 13 121 87 25 2 2 6
44435- 2 2 6 2 2 6 2 2 6 2 2 6
44436- 50 50 50 82 82 82 34 34 34 10 10 10
44437- 0 0 0 0 0 0 0 0 0 0 0 0
44438- 0 0 0 0 0 0 0 0 0 0 0 0
44439- 0 0 0 0 0 0 0 0 0 0 0 0
44440- 0 0 0 0 0 0 0 0 0 0 0 0
44441- 0 0 0 0 0 0 0 0 0 0 0 0
44442- 0 0 0 0 0 0 0 0 0 0 0 0
44443- 0 0 0 0 0 0 0 0 0 0 0 0
44444- 0 0 0 0 0 0 0 0 0 0 0 0
44445- 0 0 0 0 0 0 0 0 0 0 0 0
44446- 0 0 0 0 0 0 0 0 0 0 0 0
44447- 0 0 0 0 0 0 0 0 0 0 0 0
44448- 0 0 0 0 0 0 0 0 0 0 0 0
44449- 0 0 0 0 0 0 0 0 0 10 10 10
44450- 34 34 34 82 82 82 30 30 30 61 42 6
44451-180 123 7 206 145 10 230 174 11 239 182 13
44452-234 190 10 238 202 15 241 208 19 246 218 74
44453-246 218 38 246 215 20 246 215 20 246 215 20
44454-226 184 13 215 174 15 184 144 12 6 6 6
44455- 2 2 6 2 2 6 2 2 6 2 2 6
44456- 26 26 26 94 94 94 42 42 42 14 14 14
44457- 0 0 0 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 0 0 0
44459- 0 0 0 0 0 0 0 0 0 0 0 0
44460- 0 0 0 0 0 0 0 0 0 0 0 0
44461- 0 0 0 0 0 0 0 0 0 0 0 0
44462- 0 0 0 0 0 0 0 0 0 0 0 0
44463- 0 0 0 0 0 0 0 0 0 0 0 0
44464- 0 0 0 0 0 0 0 0 0 0 0 0
44465- 0 0 0 0 0 0 0 0 0 0 0 0
44466- 0 0 0 0 0 0 0 0 0 0 0 0
44467- 0 0 0 0 0 0 0 0 0 0 0 0
44468- 0 0 0 0 0 0 0 0 0 0 0 0
44469- 0 0 0 0 0 0 0 0 0 10 10 10
44470- 30 30 30 78 78 78 50 50 50 104 69 6
44471-192 133 9 216 158 10 236 178 12 236 186 11
44472-232 195 16 241 208 19 244 214 54 245 215 43
44473-246 215 20 246 215 20 241 208 19 198 155 10
44474-200 144 11 216 158 10 156 118 10 2 2 6
44475- 2 2 6 2 2 6 2 2 6 2 2 6
44476- 6 6 6 90 90 90 54 54 54 18 18 18
44477- 6 6 6 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 0 0 0 0 0 0 0 0 0 0 0 0
44480- 0 0 0 0 0 0 0 0 0 0 0 0
44481- 0 0 0 0 0 0 0 0 0 0 0 0
44482- 0 0 0 0 0 0 0 0 0 0 0 0
44483- 0 0 0 0 0 0 0 0 0 0 0 0
44484- 0 0 0 0 0 0 0 0 0 0 0 0
44485- 0 0 0 0 0 0 0 0 0 0 0 0
44486- 0 0 0 0 0 0 0 0 0 0 0 0
44487- 0 0 0 0 0 0 0 0 0 0 0 0
44488- 0 0 0 0 0 0 0 0 0 0 0 0
44489- 0 0 0 0 0 0 0 0 0 10 10 10
44490- 30 30 30 78 78 78 46 46 46 22 22 22
44491-137 92 6 210 162 10 239 182 13 238 190 10
44492-238 202 15 241 208 19 246 215 20 246 215 20
44493-241 208 19 203 166 17 185 133 11 210 150 10
44494-216 158 10 210 150 10 102 78 10 2 2 6
44495- 6 6 6 54 54 54 14 14 14 2 2 6
44496- 2 2 6 62 62 62 74 74 74 30 30 30
44497- 10 10 10 0 0 0 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 0 0 0 0 0 0 0 0 0 0 0 0
44500- 0 0 0 0 0 0 0 0 0 0 0 0
44501- 0 0 0 0 0 0 0 0 0 0 0 0
44502- 0 0 0 0 0 0 0 0 0 0 0 0
44503- 0 0 0 0 0 0 0 0 0 0 0 0
44504- 0 0 0 0 0 0 0 0 0 0 0 0
44505- 0 0 0 0 0 0 0 0 0 0 0 0
44506- 0 0 0 0 0 0 0 0 0 0 0 0
44507- 0 0 0 0 0 0 0 0 0 0 0 0
44508- 0 0 0 0 0 0 0 0 0 0 0 0
44509- 0 0 0 0 0 0 0 0 0 10 10 10
44510- 34 34 34 78 78 78 50 50 50 6 6 6
44511- 94 70 30 139 102 15 190 146 13 226 184 13
44512-232 200 30 232 195 16 215 174 15 190 146 13
44513-168 122 10 192 133 9 210 150 10 213 154 11
44514-202 150 34 182 157 106 101 98 89 2 2 6
44515- 2 2 6 78 78 78 116 116 116 58 58 58
44516- 2 2 6 22 22 22 90 90 90 46 46 46
44517- 18 18 18 6 6 6 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 0 0 0
44519- 0 0 0 0 0 0 0 0 0 0 0 0
44520- 0 0 0 0 0 0 0 0 0 0 0 0
44521- 0 0 0 0 0 0 0 0 0 0 0 0
44522- 0 0 0 0 0 0 0 0 0 0 0 0
44523- 0 0 0 0 0 0 0 0 0 0 0 0
44524- 0 0 0 0 0 0 0 0 0 0 0 0
44525- 0 0 0 0 0 0 0 0 0 0 0 0
44526- 0 0 0 0 0 0 0 0 0 0 0 0
44527- 0 0 0 0 0 0 0 0 0 0 0 0
44528- 0 0 0 0 0 0 0 0 0 0 0 0
44529- 0 0 0 0 0 0 0 0 0 10 10 10
44530- 38 38 38 86 86 86 50 50 50 6 6 6
44531-128 128 128 174 154 114 156 107 11 168 122 10
44532-198 155 10 184 144 12 197 138 11 200 144 11
44533-206 145 10 206 145 10 197 138 11 188 164 115
44534-195 195 195 198 198 198 174 174 174 14 14 14
44535- 2 2 6 22 22 22 116 116 116 116 116 116
44536- 22 22 22 2 2 6 74 74 74 70 70 70
44537- 30 30 30 10 10 10 0 0 0 0 0 0
44538- 0 0 0 0 0 0 0 0 0 0 0 0
44539- 0 0 0 0 0 0 0 0 0 0 0 0
44540- 0 0 0 0 0 0 0 0 0 0 0 0
44541- 0 0 0 0 0 0 0 0 0 0 0 0
44542- 0 0 0 0 0 0 0 0 0 0 0 0
44543- 0 0 0 0 0 0 0 0 0 0 0 0
44544- 0 0 0 0 0 0 0 0 0 0 0 0
44545- 0 0 0 0 0 0 0 0 0 0 0 0
44546- 0 0 0 0 0 0 0 0 0 0 0 0
44547- 0 0 0 0 0 0 0 0 0 0 0 0
44548- 0 0 0 0 0 0 0 0 0 0 0 0
44549- 0 0 0 0 0 0 6 6 6 18 18 18
44550- 50 50 50 101 101 101 26 26 26 10 10 10
44551-138 138 138 190 190 190 174 154 114 156 107 11
44552-197 138 11 200 144 11 197 138 11 192 133 9
44553-180 123 7 190 142 34 190 178 144 187 187 187
44554-202 202 202 221 221 221 214 214 214 66 66 66
44555- 2 2 6 2 2 6 50 50 50 62 62 62
44556- 6 6 6 2 2 6 10 10 10 90 90 90
44557- 50 50 50 18 18 18 6 6 6 0 0 0
44558- 0 0 0 0 0 0 0 0 0 0 0 0
44559- 0 0 0 0 0 0 0 0 0 0 0 0
44560- 0 0 0 0 0 0 0 0 0 0 0 0
44561- 0 0 0 0 0 0 0 0 0 0 0 0
44562- 0 0 0 0 0 0 0 0 0 0 0 0
44563- 0 0 0 0 0 0 0 0 0 0 0 0
44564- 0 0 0 0 0 0 0 0 0 0 0 0
44565- 0 0 0 0 0 0 0 0 0 0 0 0
44566- 0 0 0 0 0 0 0 0 0 0 0 0
44567- 0 0 0 0 0 0 0 0 0 0 0 0
44568- 0 0 0 0 0 0 0 0 0 0 0 0
44569- 0 0 0 0 0 0 10 10 10 34 34 34
44570- 74 74 74 74 74 74 2 2 6 6 6 6
44571-144 144 144 198 198 198 190 190 190 178 166 146
44572-154 121 60 156 107 11 156 107 11 168 124 44
44573-174 154 114 187 187 187 190 190 190 210 210 210
44574-246 246 246 253 253 253 253 253 253 182 182 182
44575- 6 6 6 2 2 6 2 2 6 2 2 6
44576- 2 2 6 2 2 6 2 2 6 62 62 62
44577- 74 74 74 34 34 34 14 14 14 0 0 0
44578- 0 0 0 0 0 0 0 0 0 0 0 0
44579- 0 0 0 0 0 0 0 0 0 0 0 0
44580- 0 0 0 0 0 0 0 0 0 0 0 0
44581- 0 0 0 0 0 0 0 0 0 0 0 0
44582- 0 0 0 0 0 0 0 0 0 0 0 0
44583- 0 0 0 0 0 0 0 0 0 0 0 0
44584- 0 0 0 0 0 0 0 0 0 0 0 0
44585- 0 0 0 0 0 0 0 0 0 0 0 0
44586- 0 0 0 0 0 0 0 0 0 0 0 0
44587- 0 0 0 0 0 0 0 0 0 0 0 0
44588- 0 0 0 0 0 0 0 0 0 0 0 0
44589- 0 0 0 10 10 10 22 22 22 54 54 54
44590- 94 94 94 18 18 18 2 2 6 46 46 46
44591-234 234 234 221 221 221 190 190 190 190 190 190
44592-190 190 190 187 187 187 187 187 187 190 190 190
44593-190 190 190 195 195 195 214 214 214 242 242 242
44594-253 253 253 253 253 253 253 253 253 253 253 253
44595- 82 82 82 2 2 6 2 2 6 2 2 6
44596- 2 2 6 2 2 6 2 2 6 14 14 14
44597- 86 86 86 54 54 54 22 22 22 6 6 6
44598- 0 0 0 0 0 0 0 0 0 0 0 0
44599- 0 0 0 0 0 0 0 0 0 0 0 0
44600- 0 0 0 0 0 0 0 0 0 0 0 0
44601- 0 0 0 0 0 0 0 0 0 0 0 0
44602- 0 0 0 0 0 0 0 0 0 0 0 0
44603- 0 0 0 0 0 0 0 0 0 0 0 0
44604- 0 0 0 0 0 0 0 0 0 0 0 0
44605- 0 0 0 0 0 0 0 0 0 0 0 0
44606- 0 0 0 0 0 0 0 0 0 0 0 0
44607- 0 0 0 0 0 0 0 0 0 0 0 0
44608- 0 0 0 0 0 0 0 0 0 0 0 0
44609- 6 6 6 18 18 18 46 46 46 90 90 90
44610- 46 46 46 18 18 18 6 6 6 182 182 182
44611-253 253 253 246 246 246 206 206 206 190 190 190
44612-190 190 190 190 190 190 190 190 190 190 190 190
44613-206 206 206 231 231 231 250 250 250 253 253 253
44614-253 253 253 253 253 253 253 253 253 253 253 253
44615-202 202 202 14 14 14 2 2 6 2 2 6
44616- 2 2 6 2 2 6 2 2 6 2 2 6
44617- 42 42 42 86 86 86 42 42 42 18 18 18
44618- 6 6 6 0 0 0 0 0 0 0 0 0
44619- 0 0 0 0 0 0 0 0 0 0 0 0
44620- 0 0 0 0 0 0 0 0 0 0 0 0
44621- 0 0 0 0 0 0 0 0 0 0 0 0
44622- 0 0 0 0 0 0 0 0 0 0 0 0
44623- 0 0 0 0 0 0 0 0 0 0 0 0
44624- 0 0 0 0 0 0 0 0 0 0 0 0
44625- 0 0 0 0 0 0 0 0 0 0 0 0
44626- 0 0 0 0 0 0 0 0 0 0 0 0
44627- 0 0 0 0 0 0 0 0 0 0 0 0
44628- 0 0 0 0 0 0 0 0 0 6 6 6
44629- 14 14 14 38 38 38 74 74 74 66 66 66
44630- 2 2 6 6 6 6 90 90 90 250 250 250
44631-253 253 253 253 253 253 238 238 238 198 198 198
44632-190 190 190 190 190 190 195 195 195 221 221 221
44633-246 246 246 253 253 253 253 253 253 253 253 253
44634-253 253 253 253 253 253 253 253 253 253 253 253
44635-253 253 253 82 82 82 2 2 6 2 2 6
44636- 2 2 6 2 2 6 2 2 6 2 2 6
44637- 2 2 6 78 78 78 70 70 70 34 34 34
44638- 14 14 14 6 6 6 0 0 0 0 0 0
44639- 0 0 0 0 0 0 0 0 0 0 0 0
44640- 0 0 0 0 0 0 0 0 0 0 0 0
44641- 0 0 0 0 0 0 0 0 0 0 0 0
44642- 0 0 0 0 0 0 0 0 0 0 0 0
44643- 0 0 0 0 0 0 0 0 0 0 0 0
44644- 0 0 0 0 0 0 0 0 0 0 0 0
44645- 0 0 0 0 0 0 0 0 0 0 0 0
44646- 0 0 0 0 0 0 0 0 0 0 0 0
44647- 0 0 0 0 0 0 0 0 0 0 0 0
44648- 0 0 0 0 0 0 0 0 0 14 14 14
44649- 34 34 34 66 66 66 78 78 78 6 6 6
44650- 2 2 6 18 18 18 218 218 218 253 253 253
44651-253 253 253 253 253 253 253 253 253 246 246 246
44652-226 226 226 231 231 231 246 246 246 253 253 253
44653-253 253 253 253 253 253 253 253 253 253 253 253
44654-253 253 253 253 253 253 253 253 253 253 253 253
44655-253 253 253 178 178 178 2 2 6 2 2 6
44656- 2 2 6 2 2 6 2 2 6 2 2 6
44657- 2 2 6 18 18 18 90 90 90 62 62 62
44658- 30 30 30 10 10 10 0 0 0 0 0 0
44659- 0 0 0 0 0 0 0 0 0 0 0 0
44660- 0 0 0 0 0 0 0 0 0 0 0 0
44661- 0 0 0 0 0 0 0 0 0 0 0 0
44662- 0 0 0 0 0 0 0 0 0 0 0 0
44663- 0 0 0 0 0 0 0 0 0 0 0 0
44664- 0 0 0 0 0 0 0 0 0 0 0 0
44665- 0 0 0 0 0 0 0 0 0 0 0 0
44666- 0 0 0 0 0 0 0 0 0 0 0 0
44667- 0 0 0 0 0 0 0 0 0 0 0 0
44668- 0 0 0 0 0 0 10 10 10 26 26 26
44669- 58 58 58 90 90 90 18 18 18 2 2 6
44670- 2 2 6 110 110 110 253 253 253 253 253 253
44671-253 253 253 253 253 253 253 253 253 253 253 253
44672-250 250 250 253 253 253 253 253 253 253 253 253
44673-253 253 253 253 253 253 253 253 253 253 253 253
44674-253 253 253 253 253 253 253 253 253 253 253 253
44675-253 253 253 231 231 231 18 18 18 2 2 6
44676- 2 2 6 2 2 6 2 2 6 2 2 6
44677- 2 2 6 2 2 6 18 18 18 94 94 94
44678- 54 54 54 26 26 26 10 10 10 0 0 0
44679- 0 0 0 0 0 0 0 0 0 0 0 0
44680- 0 0 0 0 0 0 0 0 0 0 0 0
44681- 0 0 0 0 0 0 0 0 0 0 0 0
44682- 0 0 0 0 0 0 0 0 0 0 0 0
44683- 0 0 0 0 0 0 0 0 0 0 0 0
44684- 0 0 0 0 0 0 0 0 0 0 0 0
44685- 0 0 0 0 0 0 0 0 0 0 0 0
44686- 0 0 0 0 0 0 0 0 0 0 0 0
44687- 0 0 0 0 0 0 0 0 0 0 0 0
44688- 0 0 0 6 6 6 22 22 22 50 50 50
44689- 90 90 90 26 26 26 2 2 6 2 2 6
44690- 14 14 14 195 195 195 250 250 250 253 253 253
44691-253 253 253 253 253 253 253 253 253 253 253 253
44692-253 253 253 253 253 253 253 253 253 253 253 253
44693-253 253 253 253 253 253 253 253 253 253 253 253
44694-253 253 253 253 253 253 253 253 253 253 253 253
44695-250 250 250 242 242 242 54 54 54 2 2 6
44696- 2 2 6 2 2 6 2 2 6 2 2 6
44697- 2 2 6 2 2 6 2 2 6 38 38 38
44698- 86 86 86 50 50 50 22 22 22 6 6 6
44699- 0 0 0 0 0 0 0 0 0 0 0 0
44700- 0 0 0 0 0 0 0 0 0 0 0 0
44701- 0 0 0 0 0 0 0 0 0 0 0 0
44702- 0 0 0 0 0 0 0 0 0 0 0 0
44703- 0 0 0 0 0 0 0 0 0 0 0 0
44704- 0 0 0 0 0 0 0 0 0 0 0 0
44705- 0 0 0 0 0 0 0 0 0 0 0 0
44706- 0 0 0 0 0 0 0 0 0 0 0 0
44707- 0 0 0 0 0 0 0 0 0 0 0 0
44708- 6 6 6 14 14 14 38 38 38 82 82 82
44709- 34 34 34 2 2 6 2 2 6 2 2 6
44710- 42 42 42 195 195 195 246 246 246 253 253 253
44711-253 253 253 253 253 253 253 253 253 250 250 250
44712-242 242 242 242 242 242 250 250 250 253 253 253
44713-253 253 253 253 253 253 253 253 253 253 253 253
44714-253 253 253 250 250 250 246 246 246 238 238 238
44715-226 226 226 231 231 231 101 101 101 6 6 6
44716- 2 2 6 2 2 6 2 2 6 2 2 6
44717- 2 2 6 2 2 6 2 2 6 2 2 6
44718- 38 38 38 82 82 82 42 42 42 14 14 14
44719- 6 6 6 0 0 0 0 0 0 0 0 0
44720- 0 0 0 0 0 0 0 0 0 0 0 0
44721- 0 0 0 0 0 0 0 0 0 0 0 0
44722- 0 0 0 0 0 0 0 0 0 0 0 0
44723- 0 0 0 0 0 0 0 0 0 0 0 0
44724- 0 0 0 0 0 0 0 0 0 0 0 0
44725- 0 0 0 0 0 0 0 0 0 0 0 0
44726- 0 0 0 0 0 0 0 0 0 0 0 0
44727- 0 0 0 0 0 0 0 0 0 0 0 0
44728- 10 10 10 26 26 26 62 62 62 66 66 66
44729- 2 2 6 2 2 6 2 2 6 6 6 6
44730- 70 70 70 170 170 170 206 206 206 234 234 234
44731-246 246 246 250 250 250 250 250 250 238 238 238
44732-226 226 226 231 231 231 238 238 238 250 250 250
44733-250 250 250 250 250 250 246 246 246 231 231 231
44734-214 214 214 206 206 206 202 202 202 202 202 202
44735-198 198 198 202 202 202 182 182 182 18 18 18
44736- 2 2 6 2 2 6 2 2 6 2 2 6
44737- 2 2 6 2 2 6 2 2 6 2 2 6
44738- 2 2 6 62 62 62 66 66 66 30 30 30
44739- 10 10 10 0 0 0 0 0 0 0 0 0
44740- 0 0 0 0 0 0 0 0 0 0 0 0
44741- 0 0 0 0 0 0 0 0 0 0 0 0
44742- 0 0 0 0 0 0 0 0 0 0 0 0
44743- 0 0 0 0 0 0 0 0 0 0 0 0
44744- 0 0 0 0 0 0 0 0 0 0 0 0
44745- 0 0 0 0 0 0 0 0 0 0 0 0
44746- 0 0 0 0 0 0 0 0 0 0 0 0
44747- 0 0 0 0 0 0 0 0 0 0 0 0
44748- 14 14 14 42 42 42 82 82 82 18 18 18
44749- 2 2 6 2 2 6 2 2 6 10 10 10
44750- 94 94 94 182 182 182 218 218 218 242 242 242
44751-250 250 250 253 253 253 253 253 253 250 250 250
44752-234 234 234 253 253 253 253 253 253 253 253 253
44753-253 253 253 253 253 253 253 253 253 246 246 246
44754-238 238 238 226 226 226 210 210 210 202 202 202
44755-195 195 195 195 195 195 210 210 210 158 158 158
44756- 6 6 6 14 14 14 50 50 50 14 14 14
44757- 2 2 6 2 2 6 2 2 6 2 2 6
44758- 2 2 6 6 6 6 86 86 86 46 46 46
44759- 18 18 18 6 6 6 0 0 0 0 0 0
44760- 0 0 0 0 0 0 0 0 0 0 0 0
44761- 0 0 0 0 0 0 0 0 0 0 0 0
44762- 0 0 0 0 0 0 0 0 0 0 0 0
44763- 0 0 0 0 0 0 0 0 0 0 0 0
44764- 0 0 0 0 0 0 0 0 0 0 0 0
44765- 0 0 0 0 0 0 0 0 0 0 0 0
44766- 0 0 0 0 0 0 0 0 0 0 0 0
44767- 0 0 0 0 0 0 0 0 0 6 6 6
44768- 22 22 22 54 54 54 70 70 70 2 2 6
44769- 2 2 6 10 10 10 2 2 6 22 22 22
44770-166 166 166 231 231 231 250 250 250 253 253 253
44771-253 253 253 253 253 253 253 253 253 250 250 250
44772-242 242 242 253 253 253 253 253 253 253 253 253
44773-253 253 253 253 253 253 253 253 253 253 253 253
44774-253 253 253 253 253 253 253 253 253 246 246 246
44775-231 231 231 206 206 206 198 198 198 226 226 226
44776- 94 94 94 2 2 6 6 6 6 38 38 38
44777- 30 30 30 2 2 6 2 2 6 2 2 6
44778- 2 2 6 2 2 6 62 62 62 66 66 66
44779- 26 26 26 10 10 10 0 0 0 0 0 0
44780- 0 0 0 0 0 0 0 0 0 0 0 0
44781- 0 0 0 0 0 0 0 0 0 0 0 0
44782- 0 0 0 0 0 0 0 0 0 0 0 0
44783- 0 0 0 0 0 0 0 0 0 0 0 0
44784- 0 0 0 0 0 0 0 0 0 0 0 0
44785- 0 0 0 0 0 0 0 0 0 0 0 0
44786- 0 0 0 0 0 0 0 0 0 0 0 0
44787- 0 0 0 0 0 0 0 0 0 10 10 10
44788- 30 30 30 74 74 74 50 50 50 2 2 6
44789- 26 26 26 26 26 26 2 2 6 106 106 106
44790-238 238 238 253 253 253 253 253 253 253 253 253
44791-253 253 253 253 253 253 253 253 253 253 253 253
44792-253 253 253 253 253 253 253 253 253 253 253 253
44793-253 253 253 253 253 253 253 253 253 253 253 253
44794-253 253 253 253 253 253 253 253 253 253 253 253
44795-253 253 253 246 246 246 218 218 218 202 202 202
44796-210 210 210 14 14 14 2 2 6 2 2 6
44797- 30 30 30 22 22 22 2 2 6 2 2 6
44798- 2 2 6 2 2 6 18 18 18 86 86 86
44799- 42 42 42 14 14 14 0 0 0 0 0 0
44800- 0 0 0 0 0 0 0 0 0 0 0 0
44801- 0 0 0 0 0 0 0 0 0 0 0 0
44802- 0 0 0 0 0 0 0 0 0 0 0 0
44803- 0 0 0 0 0 0 0 0 0 0 0 0
44804- 0 0 0 0 0 0 0 0 0 0 0 0
44805- 0 0 0 0 0 0 0 0 0 0 0 0
44806- 0 0 0 0 0 0 0 0 0 0 0 0
44807- 0 0 0 0 0 0 0 0 0 14 14 14
44808- 42 42 42 90 90 90 22 22 22 2 2 6
44809- 42 42 42 2 2 6 18 18 18 218 218 218
44810-253 253 253 253 253 253 253 253 253 253 253 253
44811-253 253 253 253 253 253 253 253 253 253 253 253
44812-253 253 253 253 253 253 253 253 253 253 253 253
44813-253 253 253 253 253 253 253 253 253 253 253 253
44814-253 253 253 253 253 253 253 253 253 253 253 253
44815-253 253 253 253 253 253 250 250 250 221 221 221
44816-218 218 218 101 101 101 2 2 6 14 14 14
44817- 18 18 18 38 38 38 10 10 10 2 2 6
44818- 2 2 6 2 2 6 2 2 6 78 78 78
44819- 58 58 58 22 22 22 6 6 6 0 0 0
44820- 0 0 0 0 0 0 0 0 0 0 0 0
44821- 0 0 0 0 0 0 0 0 0 0 0 0
44822- 0 0 0 0 0 0 0 0 0 0 0 0
44823- 0 0 0 0 0 0 0 0 0 0 0 0
44824- 0 0 0 0 0 0 0 0 0 0 0 0
44825- 0 0 0 0 0 0 0 0 0 0 0 0
44826- 0 0 0 0 0 0 0 0 0 0 0 0
44827- 0 0 0 0 0 0 6 6 6 18 18 18
44828- 54 54 54 82 82 82 2 2 6 26 26 26
44829- 22 22 22 2 2 6 123 123 123 253 253 253
44830-253 253 253 253 253 253 253 253 253 253 253 253
44831-253 253 253 253 253 253 253 253 253 253 253 253
44832-253 253 253 253 253 253 253 253 253 253 253 253
44833-253 253 253 253 253 253 253 253 253 253 253 253
44834-253 253 253 253 253 253 253 253 253 253 253 253
44835-253 253 253 253 253 253 253 253 253 250 250 250
44836-238 238 238 198 198 198 6 6 6 38 38 38
44837- 58 58 58 26 26 26 38 38 38 2 2 6
44838- 2 2 6 2 2 6 2 2 6 46 46 46
44839- 78 78 78 30 30 30 10 10 10 0 0 0
44840- 0 0 0 0 0 0 0 0 0 0 0 0
44841- 0 0 0 0 0 0 0 0 0 0 0 0
44842- 0 0 0 0 0 0 0 0 0 0 0 0
44843- 0 0 0 0 0 0 0 0 0 0 0 0
44844- 0 0 0 0 0 0 0 0 0 0 0 0
44845- 0 0 0 0 0 0 0 0 0 0 0 0
44846- 0 0 0 0 0 0 0 0 0 0 0 0
44847- 0 0 0 0 0 0 10 10 10 30 30 30
44848- 74 74 74 58 58 58 2 2 6 42 42 42
44849- 2 2 6 22 22 22 231 231 231 253 253 253
44850-253 253 253 253 253 253 253 253 253 253 253 253
44851-253 253 253 253 253 253 253 253 253 250 250 250
44852-253 253 253 253 253 253 253 253 253 253 253 253
44853-253 253 253 253 253 253 253 253 253 253 253 253
44854-253 253 253 253 253 253 253 253 253 253 253 253
44855-253 253 253 253 253 253 253 253 253 253 253 253
44856-253 253 253 246 246 246 46 46 46 38 38 38
44857- 42 42 42 14 14 14 38 38 38 14 14 14
44858- 2 2 6 2 2 6 2 2 6 6 6 6
44859- 86 86 86 46 46 46 14 14 14 0 0 0
44860- 0 0 0 0 0 0 0 0 0 0 0 0
44861- 0 0 0 0 0 0 0 0 0 0 0 0
44862- 0 0 0 0 0 0 0 0 0 0 0 0
44863- 0 0 0 0 0 0 0 0 0 0 0 0
44864- 0 0 0 0 0 0 0 0 0 0 0 0
44865- 0 0 0 0 0 0 0 0 0 0 0 0
44866- 0 0 0 0 0 0 0 0 0 0 0 0
44867- 0 0 0 6 6 6 14 14 14 42 42 42
44868- 90 90 90 18 18 18 18 18 18 26 26 26
44869- 2 2 6 116 116 116 253 253 253 253 253 253
44870-253 253 253 253 253 253 253 253 253 253 253 253
44871-253 253 253 253 253 253 250 250 250 238 238 238
44872-253 253 253 253 253 253 253 253 253 253 253 253
44873-253 253 253 253 253 253 253 253 253 253 253 253
44874-253 253 253 253 253 253 253 253 253 253 253 253
44875-253 253 253 253 253 253 253 253 253 253 253 253
44876-253 253 253 253 253 253 94 94 94 6 6 6
44877- 2 2 6 2 2 6 10 10 10 34 34 34
44878- 2 2 6 2 2 6 2 2 6 2 2 6
44879- 74 74 74 58 58 58 22 22 22 6 6 6
44880- 0 0 0 0 0 0 0 0 0 0 0 0
44881- 0 0 0 0 0 0 0 0 0 0 0 0
44882- 0 0 0 0 0 0 0 0 0 0 0 0
44883- 0 0 0 0 0 0 0 0 0 0 0 0
44884- 0 0 0 0 0 0 0 0 0 0 0 0
44885- 0 0 0 0 0 0 0 0 0 0 0 0
44886- 0 0 0 0 0 0 0 0 0 0 0 0
44887- 0 0 0 10 10 10 26 26 26 66 66 66
44888- 82 82 82 2 2 6 38 38 38 6 6 6
44889- 14 14 14 210 210 210 253 253 253 253 253 253
44890-253 253 253 253 253 253 253 253 253 253 253 253
44891-253 253 253 253 253 253 246 246 246 242 242 242
44892-253 253 253 253 253 253 253 253 253 253 253 253
44893-253 253 253 253 253 253 253 253 253 253 253 253
44894-253 253 253 253 253 253 253 253 253 253 253 253
44895-253 253 253 253 253 253 253 253 253 253 253 253
44896-253 253 253 253 253 253 144 144 144 2 2 6
44897- 2 2 6 2 2 6 2 2 6 46 46 46
44898- 2 2 6 2 2 6 2 2 6 2 2 6
44899- 42 42 42 74 74 74 30 30 30 10 10 10
44900- 0 0 0 0 0 0 0 0 0 0 0 0
44901- 0 0 0 0 0 0 0 0 0 0 0 0
44902- 0 0 0 0 0 0 0 0 0 0 0 0
44903- 0 0 0 0 0 0 0 0 0 0 0 0
44904- 0 0 0 0 0 0 0 0 0 0 0 0
44905- 0 0 0 0 0 0 0 0 0 0 0 0
44906- 0 0 0 0 0 0 0 0 0 0 0 0
44907- 6 6 6 14 14 14 42 42 42 90 90 90
44908- 26 26 26 6 6 6 42 42 42 2 2 6
44909- 74 74 74 250 250 250 253 253 253 253 253 253
44910-253 253 253 253 253 253 253 253 253 253 253 253
44911-253 253 253 253 253 253 242 242 242 242 242 242
44912-253 253 253 253 253 253 253 253 253 253 253 253
44913-253 253 253 253 253 253 253 253 253 253 253 253
44914-253 253 253 253 253 253 253 253 253 253 253 253
44915-253 253 253 253 253 253 253 253 253 253 253 253
44916-253 253 253 253 253 253 182 182 182 2 2 6
44917- 2 2 6 2 2 6 2 2 6 46 46 46
44918- 2 2 6 2 2 6 2 2 6 2 2 6
44919- 10 10 10 86 86 86 38 38 38 10 10 10
44920- 0 0 0 0 0 0 0 0 0 0 0 0
44921- 0 0 0 0 0 0 0 0 0 0 0 0
44922- 0 0 0 0 0 0 0 0 0 0 0 0
44923- 0 0 0 0 0 0 0 0 0 0 0 0
44924- 0 0 0 0 0 0 0 0 0 0 0 0
44925- 0 0 0 0 0 0 0 0 0 0 0 0
44926- 0 0 0 0 0 0 0 0 0 0 0 0
44927- 10 10 10 26 26 26 66 66 66 82 82 82
44928- 2 2 6 22 22 22 18 18 18 2 2 6
44929-149 149 149 253 253 253 253 253 253 253 253 253
44930-253 253 253 253 253 253 253 253 253 253 253 253
44931-253 253 253 253 253 253 234 234 234 242 242 242
44932-253 253 253 253 253 253 253 253 253 253 253 253
44933-253 253 253 253 253 253 253 253 253 253 253 253
44934-253 253 253 253 253 253 253 253 253 253 253 253
44935-253 253 253 253 253 253 253 253 253 253 253 253
44936-253 253 253 253 253 253 206 206 206 2 2 6
44937- 2 2 6 2 2 6 2 2 6 38 38 38
44938- 2 2 6 2 2 6 2 2 6 2 2 6
44939- 6 6 6 86 86 86 46 46 46 14 14 14
44940- 0 0 0 0 0 0 0 0 0 0 0 0
44941- 0 0 0 0 0 0 0 0 0 0 0 0
44942- 0 0 0 0 0 0 0 0 0 0 0 0
44943- 0 0 0 0 0 0 0 0 0 0 0 0
44944- 0 0 0 0 0 0 0 0 0 0 0 0
44945- 0 0 0 0 0 0 0 0 0 0 0 0
44946- 0 0 0 0 0 0 0 0 0 6 6 6
44947- 18 18 18 46 46 46 86 86 86 18 18 18
44948- 2 2 6 34 34 34 10 10 10 6 6 6
44949-210 210 210 253 253 253 253 253 253 253 253 253
44950-253 253 253 253 253 253 253 253 253 253 253 253
44951-253 253 253 253 253 253 234 234 234 242 242 242
44952-253 253 253 253 253 253 253 253 253 253 253 253
44953-253 253 253 253 253 253 253 253 253 253 253 253
44954-253 253 253 253 253 253 253 253 253 253 253 253
44955-253 253 253 253 253 253 253 253 253 253 253 253
44956-253 253 253 253 253 253 221 221 221 6 6 6
44957- 2 2 6 2 2 6 6 6 6 30 30 30
44958- 2 2 6 2 2 6 2 2 6 2 2 6
44959- 2 2 6 82 82 82 54 54 54 18 18 18
44960- 6 6 6 0 0 0 0 0 0 0 0 0
44961- 0 0 0 0 0 0 0 0 0 0 0 0
44962- 0 0 0 0 0 0 0 0 0 0 0 0
44963- 0 0 0 0 0 0 0 0 0 0 0 0
44964- 0 0 0 0 0 0 0 0 0 0 0 0
44965- 0 0 0 0 0 0 0 0 0 0 0 0
44966- 0 0 0 0 0 0 0 0 0 10 10 10
44967- 26 26 26 66 66 66 62 62 62 2 2 6
44968- 2 2 6 38 38 38 10 10 10 26 26 26
44969-238 238 238 253 253 253 253 253 253 253 253 253
44970-253 253 253 253 253 253 253 253 253 253 253 253
44971-253 253 253 253 253 253 231 231 231 238 238 238
44972-253 253 253 253 253 253 253 253 253 253 253 253
44973-253 253 253 253 253 253 253 253 253 253 253 253
44974-253 253 253 253 253 253 253 253 253 253 253 253
44975-253 253 253 253 253 253 253 253 253 253 253 253
44976-253 253 253 253 253 253 231 231 231 6 6 6
44977- 2 2 6 2 2 6 10 10 10 30 30 30
44978- 2 2 6 2 2 6 2 2 6 2 2 6
44979- 2 2 6 66 66 66 58 58 58 22 22 22
44980- 6 6 6 0 0 0 0 0 0 0 0 0
44981- 0 0 0 0 0 0 0 0 0 0 0 0
44982- 0 0 0 0 0 0 0 0 0 0 0 0
44983- 0 0 0 0 0 0 0 0 0 0 0 0
44984- 0 0 0 0 0 0 0 0 0 0 0 0
44985- 0 0 0 0 0 0 0 0 0 0 0 0
44986- 0 0 0 0 0 0 0 0 0 10 10 10
44987- 38 38 38 78 78 78 6 6 6 2 2 6
44988- 2 2 6 46 46 46 14 14 14 42 42 42
44989-246 246 246 253 253 253 253 253 253 253 253 253
44990-253 253 253 253 253 253 253 253 253 253 253 253
44991-253 253 253 253 253 253 231 231 231 242 242 242
44992-253 253 253 253 253 253 253 253 253 253 253 253
44993-253 253 253 253 253 253 253 253 253 253 253 253
44994-253 253 253 253 253 253 253 253 253 253 253 253
44995-253 253 253 253 253 253 253 253 253 253 253 253
44996-253 253 253 253 253 253 234 234 234 10 10 10
44997- 2 2 6 2 2 6 22 22 22 14 14 14
44998- 2 2 6 2 2 6 2 2 6 2 2 6
44999- 2 2 6 66 66 66 62 62 62 22 22 22
45000- 6 6 6 0 0 0 0 0 0 0 0 0
45001- 0 0 0 0 0 0 0 0 0 0 0 0
45002- 0 0 0 0 0 0 0 0 0 0 0 0
45003- 0 0 0 0 0 0 0 0 0 0 0 0
45004- 0 0 0 0 0 0 0 0 0 0 0 0
45005- 0 0 0 0 0 0 0 0 0 0 0 0
45006- 0 0 0 0 0 0 6 6 6 18 18 18
45007- 50 50 50 74 74 74 2 2 6 2 2 6
45008- 14 14 14 70 70 70 34 34 34 62 62 62
45009-250 250 250 253 253 253 253 253 253 253 253 253
45010-253 253 253 253 253 253 253 253 253 253 253 253
45011-253 253 253 253 253 253 231 231 231 246 246 246
45012-253 253 253 253 253 253 253 253 253 253 253 253
45013-253 253 253 253 253 253 253 253 253 253 253 253
45014-253 253 253 253 253 253 253 253 253 253 253 253
45015-253 253 253 253 253 253 253 253 253 253 253 253
45016-253 253 253 253 253 253 234 234 234 14 14 14
45017- 2 2 6 2 2 6 30 30 30 2 2 6
45018- 2 2 6 2 2 6 2 2 6 2 2 6
45019- 2 2 6 66 66 66 62 62 62 22 22 22
45020- 6 6 6 0 0 0 0 0 0 0 0 0
45021- 0 0 0 0 0 0 0 0 0 0 0 0
45022- 0 0 0 0 0 0 0 0 0 0 0 0
45023- 0 0 0 0 0 0 0 0 0 0 0 0
45024- 0 0 0 0 0 0 0 0 0 0 0 0
45025- 0 0 0 0 0 0 0 0 0 0 0 0
45026- 0 0 0 0 0 0 6 6 6 18 18 18
45027- 54 54 54 62 62 62 2 2 6 2 2 6
45028- 2 2 6 30 30 30 46 46 46 70 70 70
45029-250 250 250 253 253 253 253 253 253 253 253 253
45030-253 253 253 253 253 253 253 253 253 253 253 253
45031-253 253 253 253 253 253 231 231 231 246 246 246
45032-253 253 253 253 253 253 253 253 253 253 253 253
45033-253 253 253 253 253 253 253 253 253 253 253 253
45034-253 253 253 253 253 253 253 253 253 253 253 253
45035-253 253 253 253 253 253 253 253 253 253 253 253
45036-253 253 253 253 253 253 226 226 226 10 10 10
45037- 2 2 6 6 6 6 30 30 30 2 2 6
45038- 2 2 6 2 2 6 2 2 6 2 2 6
45039- 2 2 6 66 66 66 58 58 58 22 22 22
45040- 6 6 6 0 0 0 0 0 0 0 0 0
45041- 0 0 0 0 0 0 0 0 0 0 0 0
45042- 0 0 0 0 0 0 0 0 0 0 0 0
45043- 0 0 0 0 0 0 0 0 0 0 0 0
45044- 0 0 0 0 0 0 0 0 0 0 0 0
45045- 0 0 0 0 0 0 0 0 0 0 0 0
45046- 0 0 0 0 0 0 6 6 6 22 22 22
45047- 58 58 58 62 62 62 2 2 6 2 2 6
45048- 2 2 6 2 2 6 30 30 30 78 78 78
45049-250 250 250 253 253 253 253 253 253 253 253 253
45050-253 253 253 253 253 253 253 253 253 253 253 253
45051-253 253 253 253 253 253 231 231 231 246 246 246
45052-253 253 253 253 253 253 253 253 253 253 253 253
45053-253 253 253 253 253 253 253 253 253 253 253 253
45054-253 253 253 253 253 253 253 253 253 253 253 253
45055-253 253 253 253 253 253 253 253 253 253 253 253
45056-253 253 253 253 253 253 206 206 206 2 2 6
45057- 22 22 22 34 34 34 18 14 6 22 22 22
45058- 26 26 26 18 18 18 6 6 6 2 2 6
45059- 2 2 6 82 82 82 54 54 54 18 18 18
45060- 6 6 6 0 0 0 0 0 0 0 0 0
45061- 0 0 0 0 0 0 0 0 0 0 0 0
45062- 0 0 0 0 0 0 0 0 0 0 0 0
45063- 0 0 0 0 0 0 0 0 0 0 0 0
45064- 0 0 0 0 0 0 0 0 0 0 0 0
45065- 0 0 0 0 0 0 0 0 0 0 0 0
45066- 0 0 0 0 0 0 6 6 6 26 26 26
45067- 62 62 62 106 106 106 74 54 14 185 133 11
45068-210 162 10 121 92 8 6 6 6 62 62 62
45069-238 238 238 253 253 253 253 253 253 253 253 253
45070-253 253 253 253 253 253 253 253 253 253 253 253
45071-253 253 253 253 253 253 231 231 231 246 246 246
45072-253 253 253 253 253 253 253 253 253 253 253 253
45073-253 253 253 253 253 253 253 253 253 253 253 253
45074-253 253 253 253 253 253 253 253 253 253 253 253
45075-253 253 253 253 253 253 253 253 253 253 253 253
45076-253 253 253 253 253 253 158 158 158 18 18 18
45077- 14 14 14 2 2 6 2 2 6 2 2 6
45078- 6 6 6 18 18 18 66 66 66 38 38 38
45079- 6 6 6 94 94 94 50 50 50 18 18 18
45080- 6 6 6 0 0 0 0 0 0 0 0 0
45081- 0 0 0 0 0 0 0 0 0 0 0 0
45082- 0 0 0 0 0 0 0 0 0 0 0 0
45083- 0 0 0 0 0 0 0 0 0 0 0 0
45084- 0 0 0 0 0 0 0 0 0 0 0 0
45085- 0 0 0 0 0 0 0 0 0 6 6 6
45086- 10 10 10 10 10 10 18 18 18 38 38 38
45087- 78 78 78 142 134 106 216 158 10 242 186 14
45088-246 190 14 246 190 14 156 118 10 10 10 10
45089- 90 90 90 238 238 238 253 253 253 253 253 253
45090-253 253 253 253 253 253 253 253 253 253 253 253
45091-253 253 253 253 253 253 231 231 231 250 250 250
45092-253 253 253 253 253 253 253 253 253 253 253 253
45093-253 253 253 253 253 253 253 253 253 253 253 253
45094-253 253 253 253 253 253 253 253 253 253 253 253
45095-253 253 253 253 253 253 253 253 253 246 230 190
45096-238 204 91 238 204 91 181 142 44 37 26 9
45097- 2 2 6 2 2 6 2 2 6 2 2 6
45098- 2 2 6 2 2 6 38 38 38 46 46 46
45099- 26 26 26 106 106 106 54 54 54 18 18 18
45100- 6 6 6 0 0 0 0 0 0 0 0 0
45101- 0 0 0 0 0 0 0 0 0 0 0 0
45102- 0 0 0 0 0 0 0 0 0 0 0 0
45103- 0 0 0 0 0 0 0 0 0 0 0 0
45104- 0 0 0 0 0 0 0 0 0 0 0 0
45105- 0 0 0 6 6 6 14 14 14 22 22 22
45106- 30 30 30 38 38 38 50 50 50 70 70 70
45107-106 106 106 190 142 34 226 170 11 242 186 14
45108-246 190 14 246 190 14 246 190 14 154 114 10
45109- 6 6 6 74 74 74 226 226 226 253 253 253
45110-253 253 253 253 253 253 253 253 253 253 253 253
45111-253 253 253 253 253 253 231 231 231 250 250 250
45112-253 253 253 253 253 253 253 253 253 253 253 253
45113-253 253 253 253 253 253 253 253 253 253 253 253
45114-253 253 253 253 253 253 253 253 253 253 253 253
45115-253 253 253 253 253 253 253 253 253 228 184 62
45116-241 196 14 241 208 19 232 195 16 38 30 10
45117- 2 2 6 2 2 6 2 2 6 2 2 6
45118- 2 2 6 6 6 6 30 30 30 26 26 26
45119-203 166 17 154 142 90 66 66 66 26 26 26
45120- 6 6 6 0 0 0 0 0 0 0 0 0
45121- 0 0 0 0 0 0 0 0 0 0 0 0
45122- 0 0 0 0 0 0 0 0 0 0 0 0
45123- 0 0 0 0 0 0 0 0 0 0 0 0
45124- 0 0 0 0 0 0 0 0 0 0 0 0
45125- 6 6 6 18 18 18 38 38 38 58 58 58
45126- 78 78 78 86 86 86 101 101 101 123 123 123
45127-175 146 61 210 150 10 234 174 13 246 186 14
45128-246 190 14 246 190 14 246 190 14 238 190 10
45129-102 78 10 2 2 6 46 46 46 198 198 198
45130-253 253 253 253 253 253 253 253 253 253 253 253
45131-253 253 253 253 253 253 234 234 234 242 242 242
45132-253 253 253 253 253 253 253 253 253 253 253 253
45133-253 253 253 253 253 253 253 253 253 253 253 253
45134-253 253 253 253 253 253 253 253 253 253 253 253
45135-253 253 253 253 253 253 253 253 253 224 178 62
45136-242 186 14 241 196 14 210 166 10 22 18 6
45137- 2 2 6 2 2 6 2 2 6 2 2 6
45138- 2 2 6 2 2 6 6 6 6 121 92 8
45139-238 202 15 232 195 16 82 82 82 34 34 34
45140- 10 10 10 0 0 0 0 0 0 0 0 0
45141- 0 0 0 0 0 0 0 0 0 0 0 0
45142- 0 0 0 0 0 0 0 0 0 0 0 0
45143- 0 0 0 0 0 0 0 0 0 0 0 0
45144- 0 0 0 0 0 0 0 0 0 0 0 0
45145- 14 14 14 38 38 38 70 70 70 154 122 46
45146-190 142 34 200 144 11 197 138 11 197 138 11
45147-213 154 11 226 170 11 242 186 14 246 190 14
45148-246 190 14 246 190 14 246 190 14 246 190 14
45149-225 175 15 46 32 6 2 2 6 22 22 22
45150-158 158 158 250 250 250 253 253 253 253 253 253
45151-253 253 253 253 253 253 253 253 253 253 253 253
45152-253 253 253 253 253 253 253 253 253 253 253 253
45153-253 253 253 253 253 253 253 253 253 253 253 253
45154-253 253 253 253 253 253 253 253 253 253 253 253
45155-253 253 253 250 250 250 242 242 242 224 178 62
45156-239 182 13 236 186 11 213 154 11 46 32 6
45157- 2 2 6 2 2 6 2 2 6 2 2 6
45158- 2 2 6 2 2 6 61 42 6 225 175 15
45159-238 190 10 236 186 11 112 100 78 42 42 42
45160- 14 14 14 0 0 0 0 0 0 0 0 0
45161- 0 0 0 0 0 0 0 0 0 0 0 0
45162- 0 0 0 0 0 0 0 0 0 0 0 0
45163- 0 0 0 0 0 0 0 0 0 0 0 0
45164- 0 0 0 0 0 0 0 0 0 6 6 6
45165- 22 22 22 54 54 54 154 122 46 213 154 11
45166-226 170 11 230 174 11 226 170 11 226 170 11
45167-236 178 12 242 186 14 246 190 14 246 190 14
45168-246 190 14 246 190 14 246 190 14 246 190 14
45169-241 196 14 184 144 12 10 10 10 2 2 6
45170- 6 6 6 116 116 116 242 242 242 253 253 253
45171-253 253 253 253 253 253 253 253 253 253 253 253
45172-253 253 253 253 253 253 253 253 253 253 253 253
45173-253 253 253 253 253 253 253 253 253 253 253 253
45174-253 253 253 253 253 253 253 253 253 253 253 253
45175-253 253 253 231 231 231 198 198 198 214 170 54
45176-236 178 12 236 178 12 210 150 10 137 92 6
45177- 18 14 6 2 2 6 2 2 6 2 2 6
45178- 6 6 6 70 47 6 200 144 11 236 178 12
45179-239 182 13 239 182 13 124 112 88 58 58 58
45180- 22 22 22 6 6 6 0 0 0 0 0 0
45181- 0 0 0 0 0 0 0 0 0 0 0 0
45182- 0 0 0 0 0 0 0 0 0 0 0 0
45183- 0 0 0 0 0 0 0 0 0 0 0 0
45184- 0 0 0 0 0 0 0 0 0 10 10 10
45185- 30 30 30 70 70 70 180 133 36 226 170 11
45186-239 182 13 242 186 14 242 186 14 246 186 14
45187-246 190 14 246 190 14 246 190 14 246 190 14
45188-246 190 14 246 190 14 246 190 14 246 190 14
45189-246 190 14 232 195 16 98 70 6 2 2 6
45190- 2 2 6 2 2 6 66 66 66 221 221 221
45191-253 253 253 253 253 253 253 253 253 253 253 253
45192-253 253 253 253 253 253 253 253 253 253 253 253
45193-253 253 253 253 253 253 253 253 253 253 253 253
45194-253 253 253 253 253 253 253 253 253 253 253 253
45195-253 253 253 206 206 206 198 198 198 214 166 58
45196-230 174 11 230 174 11 216 158 10 192 133 9
45197-163 110 8 116 81 8 102 78 10 116 81 8
45198-167 114 7 197 138 11 226 170 11 239 182 13
45199-242 186 14 242 186 14 162 146 94 78 78 78
45200- 34 34 34 14 14 14 6 6 6 0 0 0
45201- 0 0 0 0 0 0 0 0 0 0 0 0
45202- 0 0 0 0 0 0 0 0 0 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 6 6 6
45205- 30 30 30 78 78 78 190 142 34 226 170 11
45206-239 182 13 246 190 14 246 190 14 246 190 14
45207-246 190 14 246 190 14 246 190 14 246 190 14
45208-246 190 14 246 190 14 246 190 14 246 190 14
45209-246 190 14 241 196 14 203 166 17 22 18 6
45210- 2 2 6 2 2 6 2 2 6 38 38 38
45211-218 218 218 253 253 253 253 253 253 253 253 253
45212-253 253 253 253 253 253 253 253 253 253 253 253
45213-253 253 253 253 253 253 253 253 253 253 253 253
45214-253 253 253 253 253 253 253 253 253 253 253 253
45215-250 250 250 206 206 206 198 198 198 202 162 69
45216-226 170 11 236 178 12 224 166 10 210 150 10
45217-200 144 11 197 138 11 192 133 9 197 138 11
45218-210 150 10 226 170 11 242 186 14 246 190 14
45219-246 190 14 246 186 14 225 175 15 124 112 88
45220- 62 62 62 30 30 30 14 14 14 6 6 6
45221- 0 0 0 0 0 0 0 0 0 0 0 0
45222- 0 0 0 0 0 0 0 0 0 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 10 10 10
45225- 30 30 30 78 78 78 174 135 50 224 166 10
45226-239 182 13 246 190 14 246 190 14 246 190 14
45227-246 190 14 246 190 14 246 190 14 246 190 14
45228-246 190 14 246 190 14 246 190 14 246 190 14
45229-246 190 14 246 190 14 241 196 14 139 102 15
45230- 2 2 6 2 2 6 2 2 6 2 2 6
45231- 78 78 78 250 250 250 253 253 253 253 253 253
45232-253 253 253 253 253 253 253 253 253 253 253 253
45233-253 253 253 253 253 253 253 253 253 253 253 253
45234-253 253 253 253 253 253 253 253 253 253 253 253
45235-250 250 250 214 214 214 198 198 198 190 150 46
45236-219 162 10 236 178 12 234 174 13 224 166 10
45237-216 158 10 213 154 11 213 154 11 216 158 10
45238-226 170 11 239 182 13 246 190 14 246 190 14
45239-246 190 14 246 190 14 242 186 14 206 162 42
45240-101 101 101 58 58 58 30 30 30 14 14 14
45241- 6 6 6 0 0 0 0 0 0 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 10 10 10
45245- 30 30 30 74 74 74 174 135 50 216 158 10
45246-236 178 12 246 190 14 246 190 14 246 190 14
45247-246 190 14 246 190 14 246 190 14 246 190 14
45248-246 190 14 246 190 14 246 190 14 246 190 14
45249-246 190 14 246 190 14 241 196 14 226 184 13
45250- 61 42 6 2 2 6 2 2 6 2 2 6
45251- 22 22 22 238 238 238 253 253 253 253 253 253
45252-253 253 253 253 253 253 253 253 253 253 253 253
45253-253 253 253 253 253 253 253 253 253 253 253 253
45254-253 253 253 253 253 253 253 253 253 253 253 253
45255-253 253 253 226 226 226 187 187 187 180 133 36
45256-216 158 10 236 178 12 239 182 13 236 178 12
45257-230 174 11 226 170 11 226 170 11 230 174 11
45258-236 178 12 242 186 14 246 190 14 246 190 14
45259-246 190 14 246 190 14 246 186 14 239 182 13
45260-206 162 42 106 106 106 66 66 66 34 34 34
45261- 14 14 14 6 6 6 0 0 0 0 0 0
45262- 0 0 0 0 0 0 0 0 0 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 6 6 6
45265- 26 26 26 70 70 70 163 133 67 213 154 11
45266-236 178 12 246 190 14 246 190 14 246 190 14
45267-246 190 14 246 190 14 246 190 14 246 190 14
45268-246 190 14 246 190 14 246 190 14 246 190 14
45269-246 190 14 246 190 14 246 190 14 241 196 14
45270-190 146 13 18 14 6 2 2 6 2 2 6
45271- 46 46 46 246 246 246 253 253 253 253 253 253
45272-253 253 253 253 253 253 253 253 253 253 253 253
45273-253 253 253 253 253 253 253 253 253 253 253 253
45274-253 253 253 253 253 253 253 253 253 253 253 253
45275-253 253 253 221 221 221 86 86 86 156 107 11
45276-216 158 10 236 178 12 242 186 14 246 186 14
45277-242 186 14 239 182 13 239 182 13 242 186 14
45278-242 186 14 246 186 14 246 190 14 246 190 14
45279-246 190 14 246 190 14 246 190 14 246 190 14
45280-242 186 14 225 175 15 142 122 72 66 66 66
45281- 30 30 30 10 10 10 0 0 0 0 0 0
45282- 0 0 0 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 6 6 6
45285- 26 26 26 70 70 70 163 133 67 210 150 10
45286-236 178 12 246 190 14 246 190 14 246 190 14
45287-246 190 14 246 190 14 246 190 14 246 190 14
45288-246 190 14 246 190 14 246 190 14 246 190 14
45289-246 190 14 246 190 14 246 190 14 246 190 14
45290-232 195 16 121 92 8 34 34 34 106 106 106
45291-221 221 221 253 253 253 253 253 253 253 253 253
45292-253 253 253 253 253 253 253 253 253 253 253 253
45293-253 253 253 253 253 253 253 253 253 253 253 253
45294-253 253 253 253 253 253 253 253 253 253 253 253
45295-242 242 242 82 82 82 18 14 6 163 110 8
45296-216 158 10 236 178 12 242 186 14 246 190 14
45297-246 190 14 246 190 14 246 190 14 246 190 14
45298-246 190 14 246 190 14 246 190 14 246 190 14
45299-246 190 14 246 190 14 246 190 14 246 190 14
45300-246 190 14 246 190 14 242 186 14 163 133 67
45301- 46 46 46 18 18 18 6 6 6 0 0 0
45302- 0 0 0 0 0 0 0 0 0 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 10 10 10
45305- 30 30 30 78 78 78 163 133 67 210 150 10
45306-236 178 12 246 186 14 246 190 14 246 190 14
45307-246 190 14 246 190 14 246 190 14 246 190 14
45308-246 190 14 246 190 14 246 190 14 246 190 14
45309-246 190 14 246 190 14 246 190 14 246 190 14
45310-241 196 14 215 174 15 190 178 144 253 253 253
45311-253 253 253 253 253 253 253 253 253 253 253 253
45312-253 253 253 253 253 253 253 253 253 253 253 253
45313-253 253 253 253 253 253 253 253 253 253 253 253
45314-253 253 253 253 253 253 253 253 253 218 218 218
45315- 58 58 58 2 2 6 22 18 6 167 114 7
45316-216 158 10 236 178 12 246 186 14 246 190 14
45317-246 190 14 246 190 14 246 190 14 246 190 14
45318-246 190 14 246 190 14 246 190 14 246 190 14
45319-246 190 14 246 190 14 246 190 14 246 190 14
45320-246 190 14 246 186 14 242 186 14 190 150 46
45321- 54 54 54 22 22 22 6 6 6 0 0 0
45322- 0 0 0 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 0 0 0 14 14 14
45325- 38 38 38 86 86 86 180 133 36 213 154 11
45326-236 178 12 246 186 14 246 190 14 246 190 14
45327-246 190 14 246 190 14 246 190 14 246 190 14
45328-246 190 14 246 190 14 246 190 14 246 190 14
45329-246 190 14 246 190 14 246 190 14 246 190 14
45330-246 190 14 232 195 16 190 146 13 214 214 214
45331-253 253 253 253 253 253 253 253 253 253 253 253
45332-253 253 253 253 253 253 253 253 253 253 253 253
45333-253 253 253 253 253 253 253 253 253 253 253 253
45334-253 253 253 250 250 250 170 170 170 26 26 26
45335- 2 2 6 2 2 6 37 26 9 163 110 8
45336-219 162 10 239 182 13 246 186 14 246 190 14
45337-246 190 14 246 190 14 246 190 14 246 190 14
45338-246 190 14 246 190 14 246 190 14 246 190 14
45339-246 190 14 246 190 14 246 190 14 246 190 14
45340-246 186 14 236 178 12 224 166 10 142 122 72
45341- 46 46 46 18 18 18 6 6 6 0 0 0
45342- 0 0 0 0 0 0 0 0 0 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 6 6 6 18 18 18
45345- 50 50 50 109 106 95 192 133 9 224 166 10
45346-242 186 14 246 190 14 246 190 14 246 190 14
45347-246 190 14 246 190 14 246 190 14 246 190 14
45348-246 190 14 246 190 14 246 190 14 246 190 14
45349-246 190 14 246 190 14 246 190 14 246 190 14
45350-242 186 14 226 184 13 210 162 10 142 110 46
45351-226 226 226 253 253 253 253 253 253 253 253 253
45352-253 253 253 253 253 253 253 253 253 253 253 253
45353-253 253 253 253 253 253 253 253 253 253 253 253
45354-198 198 198 66 66 66 2 2 6 2 2 6
45355- 2 2 6 2 2 6 50 34 6 156 107 11
45356-219 162 10 239 182 13 246 186 14 246 190 14
45357-246 190 14 246 190 14 246 190 14 246 190 14
45358-246 190 14 246 190 14 246 190 14 246 190 14
45359-246 190 14 246 190 14 246 190 14 242 186 14
45360-234 174 13 213 154 11 154 122 46 66 66 66
45361- 30 30 30 10 10 10 0 0 0 0 0 0
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 6 6 6 22 22 22
45365- 58 58 58 154 121 60 206 145 10 234 174 13
45366-242 186 14 246 186 14 246 190 14 246 190 14
45367-246 190 14 246 190 14 246 190 14 246 190 14
45368-246 190 14 246 190 14 246 190 14 246 190 14
45369-246 190 14 246 190 14 246 190 14 246 190 14
45370-246 186 14 236 178 12 210 162 10 163 110 8
45371- 61 42 6 138 138 138 218 218 218 250 250 250
45372-253 253 253 253 253 253 253 253 253 250 250 250
45373-242 242 242 210 210 210 144 144 144 66 66 66
45374- 6 6 6 2 2 6 2 2 6 2 2 6
45375- 2 2 6 2 2 6 61 42 6 163 110 8
45376-216 158 10 236 178 12 246 190 14 246 190 14
45377-246 190 14 246 190 14 246 190 14 246 190 14
45378-246 190 14 246 190 14 246 190 14 246 190 14
45379-246 190 14 239 182 13 230 174 11 216 158 10
45380-190 142 34 124 112 88 70 70 70 38 38 38
45381- 18 18 18 6 6 6 0 0 0 0 0 0
45382- 0 0 0 0 0 0 0 0 0 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 6 6 6 22 22 22
45385- 62 62 62 168 124 44 206 145 10 224 166 10
45386-236 178 12 239 182 13 242 186 14 242 186 14
45387-246 186 14 246 190 14 246 190 14 246 190 14
45388-246 190 14 246 190 14 246 190 14 246 190 14
45389-246 190 14 246 190 14 246 190 14 246 190 14
45390-246 190 14 236 178 12 216 158 10 175 118 6
45391- 80 54 7 2 2 6 6 6 6 30 30 30
45392- 54 54 54 62 62 62 50 50 50 38 38 38
45393- 14 14 14 2 2 6 2 2 6 2 2 6
45394- 2 2 6 2 2 6 2 2 6 2 2 6
45395- 2 2 6 6 6 6 80 54 7 167 114 7
45396-213 154 11 236 178 12 246 190 14 246 190 14
45397-246 190 14 246 190 14 246 190 14 246 190 14
45398-246 190 14 242 186 14 239 182 13 239 182 13
45399-230 174 11 210 150 10 174 135 50 124 112 88
45400- 82 82 82 54 54 54 34 34 34 18 18 18
45401- 6 6 6 0 0 0 0 0 0 0 0 0
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 6 6 6 18 18 18
45405- 50 50 50 158 118 36 192 133 9 200 144 11
45406-216 158 10 219 162 10 224 166 10 226 170 11
45407-230 174 11 236 178 12 239 182 13 239 182 13
45408-242 186 14 246 186 14 246 190 14 246 190 14
45409-246 190 14 246 190 14 246 190 14 246 190 14
45410-246 186 14 230 174 11 210 150 10 163 110 8
45411-104 69 6 10 10 10 2 2 6 2 2 6
45412- 2 2 6 2 2 6 2 2 6 2 2 6
45413- 2 2 6 2 2 6 2 2 6 2 2 6
45414- 2 2 6 2 2 6 2 2 6 2 2 6
45415- 2 2 6 6 6 6 91 60 6 167 114 7
45416-206 145 10 230 174 11 242 186 14 246 190 14
45417-246 190 14 246 190 14 246 186 14 242 186 14
45418-239 182 13 230 174 11 224 166 10 213 154 11
45419-180 133 36 124 112 88 86 86 86 58 58 58
45420- 38 38 38 22 22 22 10 10 10 6 6 6
45421- 0 0 0 0 0 0 0 0 0 0 0 0
45422- 0 0 0 0 0 0 0 0 0 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 14 14 14
45425- 34 34 34 70 70 70 138 110 50 158 118 36
45426-167 114 7 180 123 7 192 133 9 197 138 11
45427-200 144 11 206 145 10 213 154 11 219 162 10
45428-224 166 10 230 174 11 239 182 13 242 186 14
45429-246 186 14 246 186 14 246 186 14 246 186 14
45430-239 182 13 216 158 10 185 133 11 152 99 6
45431-104 69 6 18 14 6 2 2 6 2 2 6
45432- 2 2 6 2 2 6 2 2 6 2 2 6
45433- 2 2 6 2 2 6 2 2 6 2 2 6
45434- 2 2 6 2 2 6 2 2 6 2 2 6
45435- 2 2 6 6 6 6 80 54 7 152 99 6
45436-192 133 9 219 162 10 236 178 12 239 182 13
45437-246 186 14 242 186 14 239 182 13 236 178 12
45438-224 166 10 206 145 10 192 133 9 154 121 60
45439- 94 94 94 62 62 62 42 42 42 22 22 22
45440- 14 14 14 6 6 6 0 0 0 0 0 0
45441- 0 0 0 0 0 0 0 0 0 0 0 0
45442- 0 0 0 0 0 0 0 0 0 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 6 6 6
45445- 18 18 18 34 34 34 58 58 58 78 78 78
45446-101 98 89 124 112 88 142 110 46 156 107 11
45447-163 110 8 167 114 7 175 118 6 180 123 7
45448-185 133 11 197 138 11 210 150 10 219 162 10
45449-226 170 11 236 178 12 236 178 12 234 174 13
45450-219 162 10 197 138 11 163 110 8 130 83 6
45451- 91 60 6 10 10 10 2 2 6 2 2 6
45452- 18 18 18 38 38 38 38 38 38 38 38 38
45453- 38 38 38 38 38 38 38 38 38 38 38 38
45454- 38 38 38 38 38 38 26 26 26 2 2 6
45455- 2 2 6 6 6 6 70 47 6 137 92 6
45456-175 118 6 200 144 11 219 162 10 230 174 11
45457-234 174 13 230 174 11 219 162 10 210 150 10
45458-192 133 9 163 110 8 124 112 88 82 82 82
45459- 50 50 50 30 30 30 14 14 14 6 6 6
45460- 0 0 0 0 0 0 0 0 0 0 0 0
45461- 0 0 0 0 0 0 0 0 0 0 0 0
45462- 0 0 0 0 0 0 0 0 0 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 6 6 6 14 14 14 22 22 22 34 34 34
45466- 42 42 42 58 58 58 74 74 74 86 86 86
45467-101 98 89 122 102 70 130 98 46 121 87 25
45468-137 92 6 152 99 6 163 110 8 180 123 7
45469-185 133 11 197 138 11 206 145 10 200 144 11
45470-180 123 7 156 107 11 130 83 6 104 69 6
45471- 50 34 6 54 54 54 110 110 110 101 98 89
45472- 86 86 86 82 82 82 78 78 78 78 78 78
45473- 78 78 78 78 78 78 78 78 78 78 78 78
45474- 78 78 78 82 82 82 86 86 86 94 94 94
45475-106 106 106 101 101 101 86 66 34 124 80 6
45476-156 107 11 180 123 7 192 133 9 200 144 11
45477-206 145 10 200 144 11 192 133 9 175 118 6
45478-139 102 15 109 106 95 70 70 70 42 42 42
45479- 22 22 22 10 10 10 0 0 0 0 0 0
45480- 0 0 0 0 0 0 0 0 0 0 0 0
45481- 0 0 0 0 0 0 0 0 0 0 0 0
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 6 6 6 10 10 10
45486- 14 14 14 22 22 22 30 30 30 38 38 38
45487- 50 50 50 62 62 62 74 74 74 90 90 90
45488-101 98 89 112 100 78 121 87 25 124 80 6
45489-137 92 6 152 99 6 152 99 6 152 99 6
45490-138 86 6 124 80 6 98 70 6 86 66 30
45491-101 98 89 82 82 82 58 58 58 46 46 46
45492- 38 38 38 34 34 34 34 34 34 34 34 34
45493- 34 34 34 34 34 34 34 34 34 34 34 34
45494- 34 34 34 34 34 34 38 38 38 42 42 42
45495- 54 54 54 82 82 82 94 86 76 91 60 6
45496-134 86 6 156 107 11 167 114 7 175 118 6
45497-175 118 6 167 114 7 152 99 6 121 87 25
45498-101 98 89 62 62 62 34 34 34 18 18 18
45499- 6 6 6 0 0 0 0 0 0 0 0 0
45500- 0 0 0 0 0 0 0 0 0 0 0 0
45501- 0 0 0 0 0 0 0 0 0 0 0 0
45502- 0 0 0 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 0 0 0 0
45505- 0 0 0 0 0 0 0 0 0 0 0 0
45506- 0 0 0 6 6 6 6 6 6 10 10 10
45507- 18 18 18 22 22 22 30 30 30 42 42 42
45508- 50 50 50 66 66 66 86 86 86 101 98 89
45509-106 86 58 98 70 6 104 69 6 104 69 6
45510-104 69 6 91 60 6 82 62 34 90 90 90
45511- 62 62 62 38 38 38 22 22 22 14 14 14
45512- 10 10 10 10 10 10 10 10 10 10 10 10
45513- 10 10 10 10 10 10 6 6 6 10 10 10
45514- 10 10 10 10 10 10 10 10 10 14 14 14
45515- 22 22 22 42 42 42 70 70 70 89 81 66
45516- 80 54 7 104 69 6 124 80 6 137 92 6
45517-134 86 6 116 81 8 100 82 52 86 86 86
45518- 58 58 58 30 30 30 14 14 14 6 6 6
45519- 0 0 0 0 0 0 0 0 0 0 0 0
45520- 0 0 0 0 0 0 0 0 0 0 0 0
45521- 0 0 0 0 0 0 0 0 0 0 0 0
45522- 0 0 0 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 0 0 0 0
45526- 0 0 0 0 0 0 0 0 0 0 0 0
45527- 0 0 0 6 6 6 10 10 10 14 14 14
45528- 18 18 18 26 26 26 38 38 38 54 54 54
45529- 70 70 70 86 86 86 94 86 76 89 81 66
45530- 89 81 66 86 86 86 74 74 74 50 50 50
45531- 30 30 30 14 14 14 6 6 6 0 0 0
45532- 0 0 0 0 0 0 0 0 0 0 0 0
45533- 0 0 0 0 0 0 0 0 0 0 0 0
45534- 0 0 0 0 0 0 0 0 0 0 0 0
45535- 6 6 6 18 18 18 34 34 34 58 58 58
45536- 82 82 82 89 81 66 89 81 66 89 81 66
45537- 94 86 66 94 86 76 74 74 74 50 50 50
45538- 26 26 26 14 14 14 6 6 6 0 0 0
45539- 0 0 0 0 0 0 0 0 0 0 0 0
45540- 0 0 0 0 0 0 0 0 0 0 0 0
45541- 0 0 0 0 0 0 0 0 0 0 0 0
45542- 0 0 0 0 0 0 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 0 0 0 0
45545- 0 0 0 0 0 0 0 0 0 0 0 0
45546- 0 0 0 0 0 0 0 0 0 0 0 0
45547- 0 0 0 0 0 0 0 0 0 0 0 0
45548- 6 6 6 6 6 6 14 14 14 18 18 18
45549- 30 30 30 38 38 38 46 46 46 54 54 54
45550- 50 50 50 42 42 42 30 30 30 18 18 18
45551- 10 10 10 0 0 0 0 0 0 0 0 0
45552- 0 0 0 0 0 0 0 0 0 0 0 0
45553- 0 0 0 0 0 0 0 0 0 0 0 0
45554- 0 0 0 0 0 0 0 0 0 0 0 0
45555- 0 0 0 6 6 6 14 14 14 26 26 26
45556- 38 38 38 50 50 50 58 58 58 58 58 58
45557- 54 54 54 42 42 42 30 30 30 18 18 18
45558- 10 10 10 0 0 0 0 0 0 0 0 0
45559- 0 0 0 0 0 0 0 0 0 0 0 0
45560- 0 0 0 0 0 0 0 0 0 0 0 0
45561- 0 0 0 0 0 0 0 0 0 0 0 0
45562- 0 0 0 0 0 0 0 0 0 0 0 0
45563- 0 0 0 0 0 0 0 0 0 0 0 0
45564- 0 0 0 0 0 0 0 0 0 0 0 0
45565- 0 0 0 0 0 0 0 0 0 0 0 0
45566- 0 0 0 0 0 0 0 0 0 0 0 0
45567- 0 0 0 0 0 0 0 0 0 0 0 0
45568- 0 0 0 0 0 0 0 0 0 6 6 6
45569- 6 6 6 10 10 10 14 14 14 18 18 18
45570- 18 18 18 14 14 14 10 10 10 6 6 6
45571- 0 0 0 0 0 0 0 0 0 0 0 0
45572- 0 0 0 0 0 0 0 0 0 0 0 0
45573- 0 0 0 0 0 0 0 0 0 0 0 0
45574- 0 0 0 0 0 0 0 0 0 0 0 0
45575- 0 0 0 0 0 0 0 0 0 6 6 6
45576- 14 14 14 18 18 18 22 22 22 22 22 22
45577- 18 18 18 14 14 14 10 10 10 6 6 6
45578- 0 0 0 0 0 0 0 0 0 0 0 0
45579- 0 0 0 0 0 0 0 0 0 0 0 0
45580- 0 0 0 0 0 0 0 0 0 0 0 0
45581- 0 0 0 0 0 0 0 0 0 0 0 0
45582- 0 0 0 0 0 0 0 0 0 0 0 0
45583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596+4 4 4 4 4 4
45597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610+4 4 4 4 4 4
45611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45624+4 4 4 4 4 4
45625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45638+4 4 4 4 4 4
45639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45652+4 4 4 4 4 4
45653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45666+4 4 4 4 4 4
45667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
45672+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
45673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45676+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
45677+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45678+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
45679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45680+4 4 4 4 4 4
45681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
45686+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
45687+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45690+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
45691+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
45692+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
45693+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45694+4 4 4 4 4 4
45695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
45700+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
45701+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45704+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
45705+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
45706+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
45707+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
45708+4 4 4 4 4 4
45709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45712+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
45713+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
45714+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
45715+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
45716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45718+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
45719+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
45720+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
45721+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
45722+4 4 4 4 4 4
45723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45726+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
45727+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
45728+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
45729+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
45730+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45731+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
45732+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
45733+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
45734+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
45735+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
45736+4 4 4 4 4 4
45737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45740+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
45741+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
45742+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
45743+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
45744+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45745+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
45746+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
45747+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
45748+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
45749+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
45750+4 4 4 4 4 4
45751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45753+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
45754+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
45755+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
45756+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
45757+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
45758+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
45759+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
45760+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
45761+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
45762+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
45763+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
45764+4 4 4 4 4 4
45765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45767+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
45768+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
45769+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
45770+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
45771+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
45772+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
45773+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
45774+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
45775+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
45776+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
45777+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
45778+4 4 4 4 4 4
45779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45781+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
45782+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
45783+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
45784+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
45785+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
45786+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
45787+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
45788+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
45789+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
45790+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
45791+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
45792+4 4 4 4 4 4
45793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45795+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
45796+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
45797+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
45798+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
45799+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
45800+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
45801+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
45802+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
45803+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
45804+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
45805+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
45806+4 4 4 4 4 4
45807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45808+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
45809+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
45810+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
45811+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
45812+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
45813+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
45814+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
45815+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
45816+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
45817+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
45818+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
45819+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
45820+4 4 4 4 4 4
45821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45822+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
45823+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
45824+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
45825+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45826+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
45827+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
45828+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
45829+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
45830+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
45831+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
45832+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
45833+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
45834+0 0 0 4 4 4
45835+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
45836+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
45837+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
45838+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
45839+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
45840+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
45841+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
45842+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
45843+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
45844+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45845+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45846+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45847+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45848+2 0 0 0 0 0
45849+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45850+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45851+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45852+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45853+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45854+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45855+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45856+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45857+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45858+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45859+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45860+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45861+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45862+37 38 37 0 0 0
45863+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45864+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45865+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45866+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45867+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45868+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45869+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45870+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45871+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45872+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45873+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45874+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45875+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45876+85 115 134 4 0 0
45877+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45878+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45879+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45880+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45881+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45882+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45883+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45884+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45885+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45886+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45887+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45888+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45889+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45890+60 73 81 4 0 0
45891+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45892+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45893+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45894+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45895+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45896+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45897+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45898+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45899+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45900+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45901+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45902+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45903+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45904+16 19 21 4 0 0
45905+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45906+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45907+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45908+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45909+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45910+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45911+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45912+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45913+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45914+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45915+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45916+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45917+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45918+4 0 0 4 3 3
45919+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45920+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45921+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45922+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45923+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45924+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45925+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45926+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45927+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45928+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45929+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45930+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45931+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45932+3 2 2 4 4 4
45933+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45934+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45935+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45936+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45937+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45938+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45939+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45940+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45941+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45942+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45943+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45944+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45945+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45946+4 4 4 4 4 4
45947+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45948+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45949+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45950+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45951+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45952+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45953+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45954+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45955+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45956+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45957+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45958+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45959+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45960+4 4 4 4 4 4
45961+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45962+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45963+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45964+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45965+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45966+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45967+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45968+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45969+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45970+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45971+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45972+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45973+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45974+5 5 5 5 5 5
45975+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45976+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45977+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45978+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45979+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45980+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45981+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45982+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45983+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45984+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45985+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45986+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45987+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45988+5 5 5 4 4 4
45989+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45990+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45991+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45992+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45993+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45994+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45995+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45996+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45997+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45998+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45999+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
46000+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46001+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46002+4 4 4 4 4 4
46003+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
46004+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
46005+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
46006+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
46007+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
46008+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46009+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46010+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
46011+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
46012+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
46013+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
46014+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
46015+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46016+4 4 4 4 4 4
46017+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
46018+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
46019+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
46020+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
46021+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46022+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
46023+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
46024+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
46025+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
46026+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
46027+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
46028+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46029+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46030+4 4 4 4 4 4
46031+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
46032+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
46033+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
46034+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
46035+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46036+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46037+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
46038+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
46039+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
46040+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
46041+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
46042+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46043+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46044+4 4 4 4 4 4
46045+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
46046+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
46047+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
46048+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
46049+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46050+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
46051+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
46052+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
46053+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
46054+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
46055+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46056+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46058+4 4 4 4 4 4
46059+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
46060+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
46061+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
46062+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
46063+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
46064+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
46065+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
46066+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
46067+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
46068+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
46069+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
46070+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46071+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46072+4 4 4 4 4 4
46073+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
46074+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
46075+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
46076+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
46077+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
46078+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
46079+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
46080+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
46081+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
46082+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
46083+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
46084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46085+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46086+4 4 4 4 4 4
46087+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
46088+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
46089+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
46090+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46091+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
46092+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
46093+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
46094+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
46095+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
46096+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
46097+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46098+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46099+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46100+4 4 4 4 4 4
46101+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
46102+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
46103+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
46104+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46105+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46106+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
46107+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
46108+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
46109+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
46110+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
46111+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46112+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46113+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46114+4 4 4 4 4 4
46115+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
46116+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
46117+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46118+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
46119+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46120+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
46121+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
46122+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
46123+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
46124+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
46125+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46126+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46127+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46128+4 4 4 4 4 4
46129+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
46130+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
46131+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46132+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
46133+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46134+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
46135+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
46136+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
46137+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46138+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46139+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46140+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46141+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46142+4 4 4 4 4 4
46143+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46144+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
46145+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
46146+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
46147+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
46148+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
46149+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
46150+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
46151+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46152+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46153+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46154+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46155+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46156+4 4 4 4 4 4
46157+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
46158+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
46159+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
46160+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
46161+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46162+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
46163+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
46164+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
46165+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46166+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46167+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46168+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46169+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46170+4 4 4 4 4 4
46171+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
46172+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
46173+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46174+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
46175+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
46176+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
46177+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
46178+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
46179+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46180+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46181+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46182+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46184+4 4 4 4 4 4
46185+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
46186+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
46187+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46188+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
46189+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
46190+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
46191+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
46192+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
46193+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
46194+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46195+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46196+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46198+4 4 4 4 4 4
46199+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46200+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
46201+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
46202+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
46203+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
46204+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
46205+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
46206+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
46207+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46208+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46209+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46210+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46212+4 4 4 4 4 4
46213+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
46214+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
46215+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46216+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
46217+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
46218+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
46219+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
46220+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
46221+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
46222+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46223+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46224+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46226+4 4 4 4 4 4
46227+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
46228+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
46229+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
46230+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
46231+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
46232+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
46233+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
46234+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
46235+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46236+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46237+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46238+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46240+4 4 4 4 4 4
46241+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46242+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
46243+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
46244+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
46245+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
46246+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
46247+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
46248+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
46249+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46250+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46251+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46252+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46254+4 4 4 4 4 4
46255+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46256+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
46257+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
46258+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
46259+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
46260+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
46261+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46262+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
46263+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
46264+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46265+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46266+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46268+4 4 4 4 4 4
46269+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46270+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
46271+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
46272+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46273+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
46274+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
46275+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
46276+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
46277+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
46278+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46279+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46280+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46282+4 4 4 4 4 4
46283+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
46284+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
46285+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
46286+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
46287+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
46288+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
46289+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
46290+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
46291+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
46292+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46293+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46294+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46296+4 4 4 4 4 4
46297+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46298+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
46299+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
46300+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
46301+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
46302+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
46303+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
46304+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
46305+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
46306+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46307+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46308+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46310+4 4 4 4 4 4
46311+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
46312+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
46313+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
46314+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
46315+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
46316+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
46317+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
46318+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
46319+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
46320+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46321+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46322+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46324+4 4 4 4 4 4
46325+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
46326+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
46327+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
46328+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
46329+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
46330+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
46331+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
46332+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
46333+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
46334+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
46335+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46336+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46338+4 4 4 4 4 4
46339+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
46340+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
46341+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
46342+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
46343+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
46344+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
46345+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
46346+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
46347+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
46348+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
46349+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46350+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46352+4 4 4 4 4 4
46353+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
46354+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46355+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
46356+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
46357+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
46358+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
46359+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
46360+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
46361+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
46362+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
46363+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46364+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46366+4 4 4 4 4 4
46367+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
46368+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
46369+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
46370+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
46371+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
46372+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
46373+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46374+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
46375+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
46376+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
46377+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46378+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46380+4 4 4 4 4 4
46381+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
46382+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
46383+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
46384+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
46385+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
46386+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
46387+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
46388+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
46389+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
46390+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
46391+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46392+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46394+4 4 4 4 4 4
46395+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
46396+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
46397+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46398+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
46399+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
46400+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
46401+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
46402+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
46403+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
46404+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
46405+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46406+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46408+4 4 4 4 4 4
46409+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46410+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
46411+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
46412+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
46413+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
46414+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
46415+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
46416+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
46417+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
46418+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46419+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46420+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46422+4 4 4 4 4 4
46423+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
46424+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
46425+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
46426+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
46427+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
46428+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
46429+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
46430+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
46431+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
46432+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
46433+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46434+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46436+4 4 4 4 4 4
46437+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
46438+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
46439+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
46440+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
46441+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
46442+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
46443+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
46444+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
46445+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46446+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46447+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46448+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46450+4 4 4 4 4 4
46451+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
46452+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46453+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
46454+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
46455+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
46456+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
46457+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
46458+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
46459+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
46460+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46461+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46462+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46464+4 4 4 4 4 4
46465+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
46466+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
46467+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
46468+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
46469+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
46470+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
46471+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
46472+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
46473+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
46474+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46475+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46476+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46478+4 4 4 4 4 4
46479+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46480+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
46481+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
46482+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
46483+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
46484+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
46485+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
46486+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
46487+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46488+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46489+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46490+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46492+4 4 4 4 4 4
46493+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
46494+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
46495+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46496+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
46497+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
46498+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
46499+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
46500+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
46501+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46502+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46503+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46504+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46506+4 4 4 4 4 4
46507+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46508+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
46509+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
46510+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
46511+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
46512+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
46513+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
46514+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
46515+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46516+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46517+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46518+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46520+4 4 4 4 4 4
46521+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46522+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
46523+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46524+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
46525+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
46526+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
46527+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
46528+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
46529+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46530+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46531+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46532+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46534+4 4 4 4 4 4
46535+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46536+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
46537+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
46538+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
46539+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
46540+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
46541+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
46542+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
46543+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46544+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46545+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46546+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46548+4 4 4 4 4 4
46549+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46550+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
46551+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
46552+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46553+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
46554+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
46555+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
46556+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46557+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46558+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46559+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46562+4 4 4 4 4 4
46563+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46564+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46565+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46566+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
46567+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
46568+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
46569+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
46570+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46571+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46572+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46573+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46576+4 4 4 4 4 4
46577+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46578+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46580+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46581+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
46582+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
46583+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
46584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46585+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46586+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46587+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46590+4 4 4 4 4 4
46591+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46592+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46594+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
46595+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
46596+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
46597+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
46598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46599+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46600+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46601+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46604+4 4 4 4 4 4
46605+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46608+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
46609+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
46610+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
46611+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
46612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46613+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46614+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46618+4 4 4 4 4 4
46619+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46622+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
46623+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
46624+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
46625+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
46626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46627+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46628+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46632+4 4 4 4 4 4
46633+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46637+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
46638+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
46639+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
46640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46641+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46642+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46646+4 4 4 4 4 4
46647+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46651+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
46652+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
46653+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46655+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46660+4 4 4 4 4 4
46661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46665+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
46666+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
46667+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46669+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46674+4 4 4 4 4 4
46675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46679+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
46680+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
46681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46688+4 4 4 4 4 4
46689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46693+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
46694+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
46695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46702+4 4 4 4 4 4
46703diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
46704index fe92eed..106e085 100644
46705--- a/drivers/video/mb862xx/mb862xxfb_accel.c
46706+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
46707@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
46708 struct mb862xxfb_par *par = info->par;
46709
46710 if (info->var.bits_per_pixel == 32) {
46711- info->fbops->fb_fillrect = cfb_fillrect;
46712- info->fbops->fb_copyarea = cfb_copyarea;
46713- info->fbops->fb_imageblit = cfb_imageblit;
46714+ pax_open_kernel();
46715+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
46716+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
46717+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
46718+ pax_close_kernel();
46719 } else {
46720 outreg(disp, GC_L0EM, 3);
46721- info->fbops->fb_fillrect = mb86290fb_fillrect;
46722- info->fbops->fb_copyarea = mb86290fb_copyarea;
46723- info->fbops->fb_imageblit = mb86290fb_imageblit;
46724+ pax_open_kernel();
46725+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
46726+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
46727+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
46728+ pax_close_kernel();
46729 }
46730 outreg(draw, GDC_REG_DRAW_BASE, 0);
46731 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
46732diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
46733index ff22871..b129bed 100644
46734--- a/drivers/video/nvidia/nvidia.c
46735+++ b/drivers/video/nvidia/nvidia.c
46736@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
46737 info->fix.line_length = (info->var.xres_virtual *
46738 info->var.bits_per_pixel) >> 3;
46739 if (info->var.accel_flags) {
46740- info->fbops->fb_imageblit = nvidiafb_imageblit;
46741- info->fbops->fb_fillrect = nvidiafb_fillrect;
46742- info->fbops->fb_copyarea = nvidiafb_copyarea;
46743- info->fbops->fb_sync = nvidiafb_sync;
46744+ pax_open_kernel();
46745+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
46746+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
46747+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
46748+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
46749+ pax_close_kernel();
46750 info->pixmap.scan_align = 4;
46751 info->flags &= ~FBINFO_HWACCEL_DISABLED;
46752 info->flags |= FBINFO_READS_FAST;
46753 NVResetGraphics(info);
46754 } else {
46755- info->fbops->fb_imageblit = cfb_imageblit;
46756- info->fbops->fb_fillrect = cfb_fillrect;
46757- info->fbops->fb_copyarea = cfb_copyarea;
46758- info->fbops->fb_sync = NULL;
46759+ pax_open_kernel();
46760+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
46761+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
46762+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
46763+ *(void **)&info->fbops->fb_sync = NULL;
46764+ pax_close_kernel();
46765 info->pixmap.scan_align = 1;
46766 info->flags |= FBINFO_HWACCEL_DISABLED;
46767 info->flags &= ~FBINFO_READS_FAST;
46768@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
46769 info->pixmap.size = 8 * 1024;
46770 info->pixmap.flags = FB_PIXMAP_SYSTEM;
46771
46772- if (!hwcur)
46773- info->fbops->fb_cursor = NULL;
46774+ if (!hwcur) {
46775+ pax_open_kernel();
46776+ *(void **)&info->fbops->fb_cursor = NULL;
46777+ pax_close_kernel();
46778+ }
46779
46780 info->var.accel_flags = (!noaccel);
46781
46782diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
46783index 76d9053..dec2bfd 100644
46784--- a/drivers/video/s1d13xxxfb.c
46785+++ b/drivers/video/s1d13xxxfb.c
46786@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
46787
46788 switch(prod_id) {
46789 case S1D13506_PROD_ID: /* activate acceleration */
46790- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
46791- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
46792+ pax_open_kernel();
46793+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
46794+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
46795+ pax_close_kernel();
46796 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
46797 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
46798 break;
46799diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
46800index 97bd662..39fab85 100644
46801--- a/drivers/video/smscufx.c
46802+++ b/drivers/video/smscufx.c
46803@@ -1171,7 +1171,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
46804 fb_deferred_io_cleanup(info);
46805 kfree(info->fbdefio);
46806 info->fbdefio = NULL;
46807- info->fbops->fb_mmap = ufx_ops_mmap;
46808+ pax_open_kernel();
46809+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
46810+ pax_close_kernel();
46811 }
46812
46813 pr_debug("released /dev/fb%d user=%d count=%d",
46814diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
46815index 86d449e..8e04dc5 100644
46816--- a/drivers/video/udlfb.c
46817+++ b/drivers/video/udlfb.c
46818@@ -619,11 +619,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
46819 dlfb_urb_completion(urb);
46820
46821 error:
46822- atomic_add(bytes_sent, &dev->bytes_sent);
46823- atomic_add(bytes_identical, &dev->bytes_identical);
46824- atomic_add(width*height*2, &dev->bytes_rendered);
46825+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
46826+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
46827+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
46828 end_cycles = get_cycles();
46829- atomic_add(((unsigned int) ((end_cycles - start_cycles)
46830+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
46831 >> 10)), /* Kcycles */
46832 &dev->cpu_kcycles_used);
46833
46834@@ -744,11 +744,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
46835 dlfb_urb_completion(urb);
46836
46837 error:
46838- atomic_add(bytes_sent, &dev->bytes_sent);
46839- atomic_add(bytes_identical, &dev->bytes_identical);
46840- atomic_add(bytes_rendered, &dev->bytes_rendered);
46841+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
46842+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
46843+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
46844 end_cycles = get_cycles();
46845- atomic_add(((unsigned int) ((end_cycles - start_cycles)
46846+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
46847 >> 10)), /* Kcycles */
46848 &dev->cpu_kcycles_used);
46849 }
46850@@ -989,7 +989,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
46851 fb_deferred_io_cleanup(info);
46852 kfree(info->fbdefio);
46853 info->fbdefio = NULL;
46854- info->fbops->fb_mmap = dlfb_ops_mmap;
46855+ pax_open_kernel();
46856+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
46857+ pax_close_kernel();
46858 }
46859
46860 pr_warn("released /dev/fb%d user=%d count=%d\n",
46861@@ -1372,7 +1374,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
46862 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46863 struct dlfb_data *dev = fb_info->par;
46864 return snprintf(buf, PAGE_SIZE, "%u\n",
46865- atomic_read(&dev->bytes_rendered));
46866+ atomic_read_unchecked(&dev->bytes_rendered));
46867 }
46868
46869 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
46870@@ -1380,7 +1382,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
46871 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46872 struct dlfb_data *dev = fb_info->par;
46873 return snprintf(buf, PAGE_SIZE, "%u\n",
46874- atomic_read(&dev->bytes_identical));
46875+ atomic_read_unchecked(&dev->bytes_identical));
46876 }
46877
46878 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
46879@@ -1388,7 +1390,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
46880 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46881 struct dlfb_data *dev = fb_info->par;
46882 return snprintf(buf, PAGE_SIZE, "%u\n",
46883- atomic_read(&dev->bytes_sent));
46884+ atomic_read_unchecked(&dev->bytes_sent));
46885 }
46886
46887 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
46888@@ -1396,7 +1398,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
46889 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46890 struct dlfb_data *dev = fb_info->par;
46891 return snprintf(buf, PAGE_SIZE, "%u\n",
46892- atomic_read(&dev->cpu_kcycles_used));
46893+ atomic_read_unchecked(&dev->cpu_kcycles_used));
46894 }
46895
46896 static ssize_t edid_show(
46897@@ -1456,10 +1458,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
46898 struct fb_info *fb_info = dev_get_drvdata(fbdev);
46899 struct dlfb_data *dev = fb_info->par;
46900
46901- atomic_set(&dev->bytes_rendered, 0);
46902- atomic_set(&dev->bytes_identical, 0);
46903- atomic_set(&dev->bytes_sent, 0);
46904- atomic_set(&dev->cpu_kcycles_used, 0);
46905+ atomic_set_unchecked(&dev->bytes_rendered, 0);
46906+ atomic_set_unchecked(&dev->bytes_identical, 0);
46907+ atomic_set_unchecked(&dev->bytes_sent, 0);
46908+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
46909
46910 return count;
46911 }
46912diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
46913index b75db01..ad2f34a 100644
46914--- a/drivers/video/uvesafb.c
46915+++ b/drivers/video/uvesafb.c
46916@@ -19,6 +19,7 @@
46917 #include <linux/io.h>
46918 #include <linux/mutex.h>
46919 #include <linux/slab.h>
46920+#include <linux/moduleloader.h>
46921 #include <video/edid.h>
46922 #include <video/uvesafb.h>
46923 #ifdef CONFIG_X86
46924@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
46925 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
46926 par->pmi_setpal = par->ypan = 0;
46927 } else {
46928+
46929+#ifdef CONFIG_PAX_KERNEXEC
46930+#ifdef CONFIG_MODULES
46931+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
46932+#endif
46933+ if (!par->pmi_code) {
46934+ par->pmi_setpal = par->ypan = 0;
46935+ return 0;
46936+ }
46937+#endif
46938+
46939 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
46940 + task->t.regs.edi);
46941+
46942+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46943+ pax_open_kernel();
46944+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
46945+ pax_close_kernel();
46946+
46947+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
46948+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
46949+#else
46950 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
46951 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
46952+#endif
46953+
46954 printk(KERN_INFO "uvesafb: protected mode interface info at "
46955 "%04x:%04x\n",
46956 (u16)task->t.regs.es, (u16)task->t.regs.edi);
46957@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
46958 par->ypan = ypan;
46959
46960 if (par->pmi_setpal || par->ypan) {
46961+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
46962 if (__supported_pte_mask & _PAGE_NX) {
46963 par->pmi_setpal = par->ypan = 0;
46964 printk(KERN_WARNING "uvesafb: NX protection is actively."
46965 "We have better not to use the PMI.\n");
46966- } else {
46967+ } else
46968+#endif
46969 uvesafb_vbe_getpmi(task, par);
46970- }
46971 }
46972 #else
46973 /* The protected mode interface is not available on non-x86. */
46974@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
46975 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
46976
46977 /* Disable blanking if the user requested so. */
46978- if (!blank)
46979- info->fbops->fb_blank = NULL;
46980+ if (!blank) {
46981+ pax_open_kernel();
46982+ *(void **)&info->fbops->fb_blank = NULL;
46983+ pax_close_kernel();
46984+ }
46985
46986 /*
46987 * Find out how much IO memory is required for the mode with
46988@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
46989 info->flags = FBINFO_FLAG_DEFAULT |
46990 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
46991
46992- if (!par->ypan)
46993- info->fbops->fb_pan_display = NULL;
46994+ if (!par->ypan) {
46995+ pax_open_kernel();
46996+ *(void **)&info->fbops->fb_pan_display = NULL;
46997+ pax_close_kernel();
46998+ }
46999 }
47000
47001 static void uvesafb_init_mtrr(struct fb_info *info)
47002@@ -1836,6 +1866,11 @@ out:
47003 if (par->vbe_modes)
47004 kfree(par->vbe_modes);
47005
47006+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47007+ if (par->pmi_code)
47008+ module_free_exec(NULL, par->pmi_code);
47009+#endif
47010+
47011 framebuffer_release(info);
47012 return err;
47013 }
47014@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
47015 kfree(par->vbe_state_orig);
47016 if (par->vbe_state_saved)
47017 kfree(par->vbe_state_saved);
47018+
47019+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47020+ if (par->pmi_code)
47021+ module_free_exec(NULL, par->pmi_code);
47022+#endif
47023+
47024 }
47025
47026 framebuffer_release(info);
47027diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
47028index 501b340..d80aa17 100644
47029--- a/drivers/video/vesafb.c
47030+++ b/drivers/video/vesafb.c
47031@@ -9,6 +9,7 @@
47032 */
47033
47034 #include <linux/module.h>
47035+#include <linux/moduleloader.h>
47036 #include <linux/kernel.h>
47037 #include <linux/errno.h>
47038 #include <linux/string.h>
47039@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
47040 static int vram_total __initdata; /* Set total amount of memory */
47041 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
47042 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
47043-static void (*pmi_start)(void) __read_mostly;
47044-static void (*pmi_pal) (void) __read_mostly;
47045+static void (*pmi_start)(void) __read_only;
47046+static void (*pmi_pal) (void) __read_only;
47047 static int depth __read_mostly;
47048 static int vga_compat __read_mostly;
47049 /* --------------------------------------------------------------------- */
47050@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
47051 unsigned int size_vmode;
47052 unsigned int size_remap;
47053 unsigned int size_total;
47054+ void *pmi_code = NULL;
47055
47056 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
47057 return -ENODEV;
47058@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
47059 size_remap = size_total;
47060 vesafb_fix.smem_len = size_remap;
47061
47062-#ifndef __i386__
47063- screen_info.vesapm_seg = 0;
47064-#endif
47065-
47066 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
47067 printk(KERN_WARNING
47068 "vesafb: cannot reserve video memory at 0x%lx\n",
47069@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
47070 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
47071 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
47072
47073+#ifdef __i386__
47074+
47075+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47076+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
47077+ if (!pmi_code)
47078+#elif !defined(CONFIG_PAX_KERNEXEC)
47079+ if (0)
47080+#endif
47081+
47082+#endif
47083+ screen_info.vesapm_seg = 0;
47084+
47085 if (screen_info.vesapm_seg) {
47086- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
47087- screen_info.vesapm_seg,screen_info.vesapm_off);
47088+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
47089+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
47090 }
47091
47092 if (screen_info.vesapm_seg < 0xc000)
47093@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
47094
47095 if (ypan || pmi_setpal) {
47096 unsigned short *pmi_base;
47097+
47098 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
47099- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
47100- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
47101+
47102+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47103+ pax_open_kernel();
47104+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
47105+#else
47106+ pmi_code = pmi_base;
47107+#endif
47108+
47109+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
47110+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
47111+
47112+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47113+ pmi_start = ktva_ktla(pmi_start);
47114+ pmi_pal = ktva_ktla(pmi_pal);
47115+ pax_close_kernel();
47116+#endif
47117+
47118 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
47119 if (pmi_base[3]) {
47120 printk(KERN_INFO "vesafb: pmi: ports = ");
47121@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47122 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
47123 (ypan ? FBINFO_HWACCEL_YPAN : 0);
47124
47125- if (!ypan)
47126- info->fbops->fb_pan_display = NULL;
47127+ if (!ypan) {
47128+ pax_open_kernel();
47129+ *(void **)&info->fbops->fb_pan_display = NULL;
47130+ pax_close_kernel();
47131+ }
47132
47133 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
47134 err = -ENOMEM;
47135@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
47136 info->node, info->fix.id);
47137 return 0;
47138 err:
47139+
47140+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
47141+ module_free_exec(NULL, pmi_code);
47142+#endif
47143+
47144 if (info->screen_base)
47145 iounmap(info->screen_base);
47146 framebuffer_release(info);
47147diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
47148index 88714ae..16c2e11 100644
47149--- a/drivers/video/via/via_clock.h
47150+++ b/drivers/video/via/via_clock.h
47151@@ -56,7 +56,7 @@ struct via_clock {
47152
47153 void (*set_engine_pll_state)(u8 state);
47154 void (*set_engine_pll)(struct via_pll_config config);
47155-};
47156+} __no_const;
47157
47158
47159 static inline u32 get_pll_internal_frequency(u32 ref_freq,
47160diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
47161index fef20db..d28b1ab 100644
47162--- a/drivers/xen/xenfs/xenstored.c
47163+++ b/drivers/xen/xenfs/xenstored.c
47164@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
47165 static int xsd_kva_open(struct inode *inode, struct file *file)
47166 {
47167 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
47168+#ifdef CONFIG_GRKERNSEC_HIDESYM
47169+ NULL);
47170+#else
47171 xen_store_interface);
47172+#endif
47173+
47174 if (!file->private_data)
47175 return -ENOMEM;
47176 return 0;
47177diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
47178index 890bed5..17ae73e 100644
47179--- a/fs/9p/vfs_inode.c
47180+++ b/fs/9p/vfs_inode.c
47181@@ -1329,7 +1329,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
47182 void
47183 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47184 {
47185- char *s = nd_get_link(nd);
47186+ const char *s = nd_get_link(nd);
47187
47188 p9_debug(P9_DEBUG_VFS, " %s %s\n",
47189 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
47190diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
47191index 0efd152..b5802ad 100644
47192--- a/fs/Kconfig.binfmt
47193+++ b/fs/Kconfig.binfmt
47194@@ -89,7 +89,7 @@ config HAVE_AOUT
47195
47196 config BINFMT_AOUT
47197 tristate "Kernel support for a.out and ECOFF binaries"
47198- depends on HAVE_AOUT
47199+ depends on HAVE_AOUT && BROKEN
47200 ---help---
47201 A.out (Assembler.OUTput) is a set of formats for libraries and
47202 executables used in the earliest versions of UNIX. Linux used
47203diff --git a/fs/aio.c b/fs/aio.c
47204index 71f613c..9d01f1f 100644
47205--- a/fs/aio.c
47206+++ b/fs/aio.c
47207@@ -111,7 +111,7 @@ static int aio_setup_ring(struct kioctx *ctx)
47208 size += sizeof(struct io_event) * nr_events;
47209 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
47210
47211- if (nr_pages < 0)
47212+ if (nr_pages <= 0)
47213 return -EINVAL;
47214
47215 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
47216@@ -1373,18 +1373,19 @@ static ssize_t aio_fsync(struct kiocb *iocb)
47217 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47218 {
47219 ssize_t ret;
47220+ struct iovec iovstack;
47221
47222 #ifdef CONFIG_COMPAT
47223 if (compat)
47224 ret = compat_rw_copy_check_uvector(type,
47225 (struct compat_iovec __user *)kiocb->ki_buf,
47226- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47227+ kiocb->ki_nbytes, 1, &iovstack,
47228 &kiocb->ki_iovec);
47229 else
47230 #endif
47231 ret = rw_copy_check_uvector(type,
47232 (struct iovec __user *)kiocb->ki_buf,
47233- kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
47234+ kiocb->ki_nbytes, 1, &iovstack,
47235 &kiocb->ki_iovec);
47236 if (ret < 0)
47237 goto out;
47238@@ -1393,6 +1394,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
47239 if (ret < 0)
47240 goto out;
47241
47242+ if (kiocb->ki_iovec == &iovstack) {
47243+ kiocb->ki_inline_vec = iovstack;
47244+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
47245+ }
47246 kiocb->ki_nr_segs = kiocb->ki_nbytes;
47247 kiocb->ki_cur_seg = 0;
47248 /* ki_nbytes/left now reflect bytes instead of segs */
47249diff --git a/fs/attr.c b/fs/attr.c
47250index 1449adb..a2038c2 100644
47251--- a/fs/attr.c
47252+++ b/fs/attr.c
47253@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
47254 unsigned long limit;
47255
47256 limit = rlimit(RLIMIT_FSIZE);
47257+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
47258 if (limit != RLIM_INFINITY && offset > limit)
47259 goto out_sig;
47260 if (offset > inode->i_sb->s_maxbytes)
47261diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
47262index 03bc1d3..6205356 100644
47263--- a/fs/autofs4/waitq.c
47264+++ b/fs/autofs4/waitq.c
47265@@ -61,7 +61,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
47266 {
47267 unsigned long sigpipe, flags;
47268 mm_segment_t fs;
47269- const char *data = (const char *)addr;
47270+ const char __user *data = (const char __force_user *)addr;
47271 ssize_t wr = 0;
47272
47273 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
47274@@ -348,6 +348,10 @@ static int validate_request(struct autofs_wait_queue **wait,
47275 return 1;
47276 }
47277
47278+#ifdef CONFIG_GRKERNSEC_HIDESYM
47279+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
47280+#endif
47281+
47282 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47283 enum autofs_notify notify)
47284 {
47285@@ -381,7 +385,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
47286
47287 /* If this is a direct mount request create a dummy name */
47288 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
47289+#ifdef CONFIG_GRKERNSEC_HIDESYM
47290+ /* this name does get written to userland via autofs4_write() */
47291+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
47292+#else
47293 qstr.len = sprintf(name, "%p", dentry);
47294+#endif
47295 else {
47296 qstr.len = autofs4_getpath(sbi, dentry, &name);
47297 if (!qstr.len) {
47298diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
47299index 2b3bda8..6a2d4be 100644
47300--- a/fs/befs/linuxvfs.c
47301+++ b/fs/befs/linuxvfs.c
47302@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
47303 {
47304 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
47305 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
47306- char *link = nd_get_link(nd);
47307+ const char *link = nd_get_link(nd);
47308 if (!IS_ERR(link))
47309 kfree(link);
47310 }
47311diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
47312index 6043567..16a9239 100644
47313--- a/fs/binfmt_aout.c
47314+++ b/fs/binfmt_aout.c
47315@@ -16,6 +16,7 @@
47316 #include <linux/string.h>
47317 #include <linux/fs.h>
47318 #include <linux/file.h>
47319+#include <linux/security.h>
47320 #include <linux/stat.h>
47321 #include <linux/fcntl.h>
47322 #include <linux/ptrace.h>
47323@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
47324 #endif
47325 # define START_STACK(u) ((void __user *)u.start_stack)
47326
47327+ memset(&dump, 0, sizeof(dump));
47328+
47329 fs = get_fs();
47330 set_fs(KERNEL_DS);
47331 has_dumped = 1;
47332@@ -70,10 +73,12 @@ static int aout_core_dump(struct coredump_params *cprm)
47333
47334 /* If the size of the dump file exceeds the rlimit, then see what would happen
47335 if we wrote the stack, but not the data area. */
47336+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
47337 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
47338 dump.u_dsize = 0;
47339
47340 /* Make sure we have enough room to write the stack and data areas. */
47341+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
47342 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
47343 dump.u_ssize = 0;
47344
47345@@ -234,6 +239,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
47346 rlim = rlimit(RLIMIT_DATA);
47347 if (rlim >= RLIM_INFINITY)
47348 rlim = ~0;
47349+
47350+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
47351 if (ex.a_data + ex.a_bss > rlim)
47352 return -ENOMEM;
47353
47354@@ -268,6 +275,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
47355
47356 install_exec_creds(bprm);
47357
47358+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47359+ current->mm->pax_flags = 0UL;
47360+#endif
47361+
47362+#ifdef CONFIG_PAX_PAGEEXEC
47363+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
47364+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
47365+
47366+#ifdef CONFIG_PAX_EMUTRAMP
47367+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
47368+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
47369+#endif
47370+
47371+#ifdef CONFIG_PAX_MPROTECT
47372+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
47373+ current->mm->pax_flags |= MF_PAX_MPROTECT;
47374+#endif
47375+
47376+ }
47377+#endif
47378+
47379 if (N_MAGIC(ex) == OMAGIC) {
47380 unsigned long text_addr, map_size;
47381 loff_t pos;
47382@@ -333,7 +361,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
47383 }
47384
47385 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
47386- PROT_READ | PROT_WRITE | PROT_EXEC,
47387+ PROT_READ | PROT_WRITE,
47388 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
47389 fd_offset + ex.a_text);
47390 if (error != N_DATADDR(ex)) {
47391diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
47392index 0c42cdb..f4be023 100644
47393--- a/fs/binfmt_elf.c
47394+++ b/fs/binfmt_elf.c
47395@@ -33,6 +33,7 @@
47396 #include <linux/elf.h>
47397 #include <linux/utsname.h>
47398 #include <linux/coredump.h>
47399+#include <linux/xattr.h>
47400 #include <asm/uaccess.h>
47401 #include <asm/param.h>
47402 #include <asm/page.h>
47403@@ -59,6 +60,10 @@ static int elf_core_dump(struct coredump_params *cprm);
47404 #define elf_core_dump NULL
47405 #endif
47406
47407+#ifdef CONFIG_PAX_MPROTECT
47408+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
47409+#endif
47410+
47411 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
47412 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
47413 #else
47414@@ -78,6 +83,11 @@ static struct linux_binfmt elf_format = {
47415 .load_binary = load_elf_binary,
47416 .load_shlib = load_elf_library,
47417 .core_dump = elf_core_dump,
47418+
47419+#ifdef CONFIG_PAX_MPROTECT
47420+ .handle_mprotect= elf_handle_mprotect,
47421+#endif
47422+
47423 .min_coredump = ELF_EXEC_PAGESIZE,
47424 };
47425
47426@@ -85,6 +95,8 @@ static struct linux_binfmt elf_format = {
47427
47428 static int set_brk(unsigned long start, unsigned long end)
47429 {
47430+ unsigned long e = end;
47431+
47432 start = ELF_PAGEALIGN(start);
47433 end = ELF_PAGEALIGN(end);
47434 if (end > start) {
47435@@ -93,7 +105,7 @@ static int set_brk(unsigned long start, unsigned long end)
47436 if (BAD_ADDR(addr))
47437 return addr;
47438 }
47439- current->mm->start_brk = current->mm->brk = end;
47440+ current->mm->start_brk = current->mm->brk = e;
47441 return 0;
47442 }
47443
47444@@ -154,12 +166,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47445 elf_addr_t __user *u_rand_bytes;
47446 const char *k_platform = ELF_PLATFORM;
47447 const char *k_base_platform = ELF_BASE_PLATFORM;
47448- unsigned char k_rand_bytes[16];
47449+ u32 k_rand_bytes[4];
47450 int items;
47451 elf_addr_t *elf_info;
47452 int ei_index = 0;
47453 const struct cred *cred = current_cred();
47454 struct vm_area_struct *vma;
47455+ unsigned long saved_auxv[AT_VECTOR_SIZE];
47456
47457 /*
47458 * In some cases (e.g. Hyper-Threading), we want to avoid L1
47459@@ -201,8 +214,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47460 * Generate 16 random bytes for userspace PRNG seeding.
47461 */
47462 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
47463- u_rand_bytes = (elf_addr_t __user *)
47464- STACK_ALLOC(p, sizeof(k_rand_bytes));
47465+ srandom32(k_rand_bytes[0] ^ random32());
47466+ srandom32(k_rand_bytes[1] ^ random32());
47467+ srandom32(k_rand_bytes[2] ^ random32());
47468+ srandom32(k_rand_bytes[3] ^ random32());
47469+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
47470+ u_rand_bytes = (elf_addr_t __user *) p;
47471 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
47472 return -EFAULT;
47473
47474@@ -314,9 +331,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
47475 return -EFAULT;
47476 current->mm->env_end = p;
47477
47478+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
47479+
47480 /* Put the elf_info on the stack in the right place. */
47481 sp = (elf_addr_t __user *)envp + 1;
47482- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
47483+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
47484 return -EFAULT;
47485 return 0;
47486 }
47487@@ -380,15 +399,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
47488 an ELF header */
47489
47490 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47491- struct file *interpreter, unsigned long *interp_map_addr,
47492- unsigned long no_base)
47493+ struct file *interpreter, unsigned long no_base)
47494 {
47495 struct elf_phdr *elf_phdata;
47496 struct elf_phdr *eppnt;
47497- unsigned long load_addr = 0;
47498+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
47499 int load_addr_set = 0;
47500 unsigned long last_bss = 0, elf_bss = 0;
47501- unsigned long error = ~0UL;
47502+ unsigned long error = -EINVAL;
47503 unsigned long total_size;
47504 int retval, i, size;
47505
47506@@ -434,6 +452,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47507 goto out_close;
47508 }
47509
47510+#ifdef CONFIG_PAX_SEGMEXEC
47511+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
47512+ pax_task_size = SEGMEXEC_TASK_SIZE;
47513+#endif
47514+
47515 eppnt = elf_phdata;
47516 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
47517 if (eppnt->p_type == PT_LOAD) {
47518@@ -457,8 +480,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47519 map_addr = elf_map(interpreter, load_addr + vaddr,
47520 eppnt, elf_prot, elf_type, total_size);
47521 total_size = 0;
47522- if (!*interp_map_addr)
47523- *interp_map_addr = map_addr;
47524 error = map_addr;
47525 if (BAD_ADDR(map_addr))
47526 goto out_close;
47527@@ -477,8 +498,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
47528 k = load_addr + eppnt->p_vaddr;
47529 if (BAD_ADDR(k) ||
47530 eppnt->p_filesz > eppnt->p_memsz ||
47531- eppnt->p_memsz > TASK_SIZE ||
47532- TASK_SIZE - eppnt->p_memsz < k) {
47533+ eppnt->p_memsz > pax_task_size ||
47534+ pax_task_size - eppnt->p_memsz < k) {
47535 error = -ENOMEM;
47536 goto out_close;
47537 }
47538@@ -530,6 +551,315 @@ out:
47539 return error;
47540 }
47541
47542+#ifdef CONFIG_PAX_PT_PAX_FLAGS
47543+#ifdef CONFIG_PAX_SOFTMODE
47544+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
47545+{
47546+ unsigned long pax_flags = 0UL;
47547+
47548+#ifdef CONFIG_PAX_PAGEEXEC
47549+ if (elf_phdata->p_flags & PF_PAGEEXEC)
47550+ pax_flags |= MF_PAX_PAGEEXEC;
47551+#endif
47552+
47553+#ifdef CONFIG_PAX_SEGMEXEC
47554+ if (elf_phdata->p_flags & PF_SEGMEXEC)
47555+ pax_flags |= MF_PAX_SEGMEXEC;
47556+#endif
47557+
47558+#ifdef CONFIG_PAX_EMUTRAMP
47559+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
47560+ pax_flags |= MF_PAX_EMUTRAMP;
47561+#endif
47562+
47563+#ifdef CONFIG_PAX_MPROTECT
47564+ if (elf_phdata->p_flags & PF_MPROTECT)
47565+ pax_flags |= MF_PAX_MPROTECT;
47566+#endif
47567+
47568+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47569+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
47570+ pax_flags |= MF_PAX_RANDMMAP;
47571+#endif
47572+
47573+ return pax_flags;
47574+}
47575+#endif
47576+
47577+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
47578+{
47579+ unsigned long pax_flags = 0UL;
47580+
47581+#ifdef CONFIG_PAX_PAGEEXEC
47582+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
47583+ pax_flags |= MF_PAX_PAGEEXEC;
47584+#endif
47585+
47586+#ifdef CONFIG_PAX_SEGMEXEC
47587+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
47588+ pax_flags |= MF_PAX_SEGMEXEC;
47589+#endif
47590+
47591+#ifdef CONFIG_PAX_EMUTRAMP
47592+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
47593+ pax_flags |= MF_PAX_EMUTRAMP;
47594+#endif
47595+
47596+#ifdef CONFIG_PAX_MPROTECT
47597+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
47598+ pax_flags |= MF_PAX_MPROTECT;
47599+#endif
47600+
47601+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47602+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
47603+ pax_flags |= MF_PAX_RANDMMAP;
47604+#endif
47605+
47606+ return pax_flags;
47607+}
47608+#endif
47609+
47610+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47611+#ifdef CONFIG_PAX_SOFTMODE
47612+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
47613+{
47614+ unsigned long pax_flags = 0UL;
47615+
47616+#ifdef CONFIG_PAX_PAGEEXEC
47617+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
47618+ pax_flags |= MF_PAX_PAGEEXEC;
47619+#endif
47620+
47621+#ifdef CONFIG_PAX_SEGMEXEC
47622+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
47623+ pax_flags |= MF_PAX_SEGMEXEC;
47624+#endif
47625+
47626+#ifdef CONFIG_PAX_EMUTRAMP
47627+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
47628+ pax_flags |= MF_PAX_EMUTRAMP;
47629+#endif
47630+
47631+#ifdef CONFIG_PAX_MPROTECT
47632+ if (pax_flags_softmode & MF_PAX_MPROTECT)
47633+ pax_flags |= MF_PAX_MPROTECT;
47634+#endif
47635+
47636+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47637+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
47638+ pax_flags |= MF_PAX_RANDMMAP;
47639+#endif
47640+
47641+ return pax_flags;
47642+}
47643+#endif
47644+
47645+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
47646+{
47647+ unsigned long pax_flags = 0UL;
47648+
47649+#ifdef CONFIG_PAX_PAGEEXEC
47650+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
47651+ pax_flags |= MF_PAX_PAGEEXEC;
47652+#endif
47653+
47654+#ifdef CONFIG_PAX_SEGMEXEC
47655+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
47656+ pax_flags |= MF_PAX_SEGMEXEC;
47657+#endif
47658+
47659+#ifdef CONFIG_PAX_EMUTRAMP
47660+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
47661+ pax_flags |= MF_PAX_EMUTRAMP;
47662+#endif
47663+
47664+#ifdef CONFIG_PAX_MPROTECT
47665+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
47666+ pax_flags |= MF_PAX_MPROTECT;
47667+#endif
47668+
47669+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
47670+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
47671+ pax_flags |= MF_PAX_RANDMMAP;
47672+#endif
47673+
47674+ return pax_flags;
47675+}
47676+#endif
47677+
47678+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47679+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
47680+{
47681+ unsigned long pax_flags = 0UL;
47682+
47683+#ifdef CONFIG_PAX_EI_PAX
47684+
47685+#ifdef CONFIG_PAX_PAGEEXEC
47686+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
47687+ pax_flags |= MF_PAX_PAGEEXEC;
47688+#endif
47689+
47690+#ifdef CONFIG_PAX_SEGMEXEC
47691+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
47692+ pax_flags |= MF_PAX_SEGMEXEC;
47693+#endif
47694+
47695+#ifdef CONFIG_PAX_EMUTRAMP
47696+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
47697+ pax_flags |= MF_PAX_EMUTRAMP;
47698+#endif
47699+
47700+#ifdef CONFIG_PAX_MPROTECT
47701+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
47702+ pax_flags |= MF_PAX_MPROTECT;
47703+#endif
47704+
47705+#ifdef CONFIG_PAX_ASLR
47706+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
47707+ pax_flags |= MF_PAX_RANDMMAP;
47708+#endif
47709+
47710+#else
47711+
47712+#ifdef CONFIG_PAX_PAGEEXEC
47713+ pax_flags |= MF_PAX_PAGEEXEC;
47714+#endif
47715+
47716+#ifdef CONFIG_PAX_SEGMEXEC
47717+ pax_flags |= MF_PAX_SEGMEXEC;
47718+#endif
47719+
47720+#ifdef CONFIG_PAX_MPROTECT
47721+ pax_flags |= MF_PAX_MPROTECT;
47722+#endif
47723+
47724+#ifdef CONFIG_PAX_RANDMMAP
47725+ if (randomize_va_space)
47726+ pax_flags |= MF_PAX_RANDMMAP;
47727+#endif
47728+
47729+#endif
47730+
47731+ return pax_flags;
47732+}
47733+
47734+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
47735+{
47736+
47737+#ifdef CONFIG_PAX_PT_PAX_FLAGS
47738+ unsigned long i;
47739+
47740+ for (i = 0UL; i < elf_ex->e_phnum; i++)
47741+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
47742+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
47743+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
47744+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
47745+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
47746+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
47747+ return ~0UL;
47748+
47749+#ifdef CONFIG_PAX_SOFTMODE
47750+ if (pax_softmode)
47751+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
47752+ else
47753+#endif
47754+
47755+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
47756+ break;
47757+ }
47758+#endif
47759+
47760+ return ~0UL;
47761+}
47762+
47763+static unsigned long pax_parse_xattr_pax(struct file * const file)
47764+{
47765+
47766+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
47767+ ssize_t xattr_size, i;
47768+ unsigned char xattr_value[5];
47769+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
47770+
47771+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
47772+ if (xattr_size <= 0 || xattr_size > 5)
47773+ return ~0UL;
47774+
47775+ for (i = 0; i < xattr_size; i++)
47776+ switch (xattr_value[i]) {
47777+ default:
47778+ return ~0UL;
47779+
47780+#define parse_flag(option1, option2, flag) \
47781+ case option1: \
47782+ if (pax_flags_hardmode & MF_PAX_##flag) \
47783+ return ~0UL; \
47784+ pax_flags_hardmode |= MF_PAX_##flag; \
47785+ break; \
47786+ case option2: \
47787+ if (pax_flags_softmode & MF_PAX_##flag) \
47788+ return ~0UL; \
47789+ pax_flags_softmode |= MF_PAX_##flag; \
47790+ break;
47791+
47792+ parse_flag('p', 'P', PAGEEXEC);
47793+ parse_flag('e', 'E', EMUTRAMP);
47794+ parse_flag('m', 'M', MPROTECT);
47795+ parse_flag('r', 'R', RANDMMAP);
47796+ parse_flag('s', 'S', SEGMEXEC);
47797+
47798+#undef parse_flag
47799+ }
47800+
47801+ if (pax_flags_hardmode & pax_flags_softmode)
47802+ return ~0UL;
47803+
47804+#ifdef CONFIG_PAX_SOFTMODE
47805+ if (pax_softmode)
47806+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
47807+ else
47808+#endif
47809+
47810+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
47811+#else
47812+ return ~0UL;
47813+#endif
47814+
47815+}
47816+
47817+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
47818+{
47819+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
47820+
47821+ pax_flags = pax_parse_ei_pax(elf_ex);
47822+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
47823+ xattr_pax_flags = pax_parse_xattr_pax(file);
47824+
47825+ if (pt_pax_flags == ~0UL)
47826+ pt_pax_flags = xattr_pax_flags;
47827+ else if (xattr_pax_flags == ~0UL)
47828+ xattr_pax_flags = pt_pax_flags;
47829+ if (pt_pax_flags != xattr_pax_flags)
47830+ return -EINVAL;
47831+ if (pt_pax_flags != ~0UL)
47832+ pax_flags = pt_pax_flags;
47833+
47834+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
47835+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47836+ if ((__supported_pte_mask & _PAGE_NX))
47837+ pax_flags &= ~MF_PAX_SEGMEXEC;
47838+ else
47839+ pax_flags &= ~MF_PAX_PAGEEXEC;
47840+ }
47841+#endif
47842+
47843+ if (0 > pax_check_flags(&pax_flags))
47844+ return -EINVAL;
47845+
47846+ current->mm->pax_flags = pax_flags;
47847+ return 0;
47848+}
47849+#endif
47850+
47851 /*
47852 * These are the functions used to load ELF style executables and shared
47853 * libraries. There is no binary dependent code anywhere else.
47854@@ -546,6 +876,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
47855 {
47856 unsigned int random_variable = 0;
47857
47858+#ifdef CONFIG_PAX_RANDUSTACK
47859+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
47860+ return stack_top - current->mm->delta_stack;
47861+#endif
47862+
47863 if ((current->flags & PF_RANDOMIZE) &&
47864 !(current->personality & ADDR_NO_RANDOMIZE)) {
47865 random_variable = get_random_int() & STACK_RND_MASK;
47866@@ -564,7 +899,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
47867 unsigned long load_addr = 0, load_bias = 0;
47868 int load_addr_set = 0;
47869 char * elf_interpreter = NULL;
47870- unsigned long error;
47871+ unsigned long error = 0;
47872 struct elf_phdr *elf_ppnt, *elf_phdata;
47873 unsigned long elf_bss, elf_brk;
47874 int retval, i;
47875@@ -574,12 +909,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
47876 unsigned long start_code, end_code, start_data, end_data;
47877 unsigned long reloc_func_desc __maybe_unused = 0;
47878 int executable_stack = EXSTACK_DEFAULT;
47879- unsigned long def_flags = 0;
47880 struct pt_regs *regs = current_pt_regs();
47881 struct {
47882 struct elfhdr elf_ex;
47883 struct elfhdr interp_elf_ex;
47884 } *loc;
47885+ unsigned long pax_task_size = TASK_SIZE;
47886
47887 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
47888 if (!loc) {
47889@@ -715,11 +1050,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
47890 goto out_free_dentry;
47891
47892 /* OK, This is the point of no return */
47893- current->mm->def_flags = def_flags;
47894+
47895+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47896+ current->mm->pax_flags = 0UL;
47897+#endif
47898+
47899+#ifdef CONFIG_PAX_DLRESOLVE
47900+ current->mm->call_dl_resolve = 0UL;
47901+#endif
47902+
47903+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
47904+ current->mm->call_syscall = 0UL;
47905+#endif
47906+
47907+#ifdef CONFIG_PAX_ASLR
47908+ current->mm->delta_mmap = 0UL;
47909+ current->mm->delta_stack = 0UL;
47910+#endif
47911+
47912+ current->mm->def_flags = 0;
47913+
47914+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
47915+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
47916+ send_sig(SIGKILL, current, 0);
47917+ goto out_free_dentry;
47918+ }
47919+#endif
47920+
47921+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47922+ pax_set_initial_flags(bprm);
47923+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
47924+ if (pax_set_initial_flags_func)
47925+ (pax_set_initial_flags_func)(bprm);
47926+#endif
47927+
47928+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
47929+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
47930+ current->mm->context.user_cs_limit = PAGE_SIZE;
47931+ current->mm->def_flags |= VM_PAGEEXEC;
47932+ }
47933+#endif
47934+
47935+#ifdef CONFIG_PAX_SEGMEXEC
47936+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
47937+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
47938+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
47939+ pax_task_size = SEGMEXEC_TASK_SIZE;
47940+ current->mm->def_flags |= VM_NOHUGEPAGE;
47941+ }
47942+#endif
47943+
47944+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
47945+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47946+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
47947+ put_cpu();
47948+ }
47949+#endif
47950
47951 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
47952 may depend on the personality. */
47953 SET_PERSONALITY(loc->elf_ex);
47954+
47955+#ifdef CONFIG_PAX_ASLR
47956+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
47957+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
47958+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
47959+ }
47960+#endif
47961+
47962+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
47963+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
47964+ executable_stack = EXSTACK_DISABLE_X;
47965+ current->personality &= ~READ_IMPLIES_EXEC;
47966+ } else
47967+#endif
47968+
47969 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
47970 current->personality |= READ_IMPLIES_EXEC;
47971
47972@@ -810,6 +1215,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
47973 #else
47974 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
47975 #endif
47976+
47977+#ifdef CONFIG_PAX_RANDMMAP
47978+ /* PaX: randomize base address at the default exe base if requested */
47979+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
47980+#ifdef CONFIG_SPARC64
47981+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
47982+#else
47983+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
47984+#endif
47985+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
47986+ elf_flags |= MAP_FIXED;
47987+ }
47988+#endif
47989+
47990 }
47991
47992 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
47993@@ -842,9 +1261,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
47994 * allowed task size. Note that p_filesz must always be
47995 * <= p_memsz so it is only necessary to check p_memsz.
47996 */
47997- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
47998- elf_ppnt->p_memsz > TASK_SIZE ||
47999- TASK_SIZE - elf_ppnt->p_memsz < k) {
48000+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
48001+ elf_ppnt->p_memsz > pax_task_size ||
48002+ pax_task_size - elf_ppnt->p_memsz < k) {
48003 /* set_brk can never work. Avoid overflows. */
48004 send_sig(SIGKILL, current, 0);
48005 retval = -EINVAL;
48006@@ -883,17 +1302,44 @@ static int load_elf_binary(struct linux_binprm *bprm)
48007 goto out_free_dentry;
48008 }
48009 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
48010- send_sig(SIGSEGV, current, 0);
48011- retval = -EFAULT; /* Nobody gets to see this, but.. */
48012- goto out_free_dentry;
48013+ /*
48014+ * This bss-zeroing can fail if the ELF
48015+ * file specifies odd protections. So
48016+ * we don't check the return value
48017+ */
48018 }
48019
48020+#ifdef CONFIG_PAX_RANDMMAP
48021+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
48022+ unsigned long start, size;
48023+
48024+ start = ELF_PAGEALIGN(elf_brk);
48025+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
48026+ down_read(&current->mm->mmap_sem);
48027+ retval = -ENOMEM;
48028+ if (!find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
48029+ unsigned long prot = PROT_NONE;
48030+
48031+ up_read(&current->mm->mmap_sem);
48032+ current->mm->brk_gap = PAGE_ALIGN(size) >> PAGE_SHIFT;
48033+// if (current->personality & ADDR_NO_RANDOMIZE)
48034+// prot = PROT_READ;
48035+ start = vm_mmap(NULL, start, size, prot, MAP_ANONYMOUS | MAP_FIXED | MAP_PRIVATE, 0);
48036+ retval = IS_ERR_VALUE(start) ? start : 0;
48037+ } else
48038+ up_read(&current->mm->mmap_sem);
48039+ if (retval == 0)
48040+ retval = set_brk(start + size, start + size + PAGE_SIZE);
48041+ if (retval < 0) {
48042+ send_sig(SIGKILL, current, 0);
48043+ goto out_free_dentry;
48044+ }
48045+ }
48046+#endif
48047+
48048 if (elf_interpreter) {
48049- unsigned long interp_map_addr = 0;
48050-
48051 elf_entry = load_elf_interp(&loc->interp_elf_ex,
48052 interpreter,
48053- &interp_map_addr,
48054 load_bias);
48055 if (!IS_ERR((void *)elf_entry)) {
48056 /*
48057@@ -1115,7 +1561,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
48058 * Decide what to dump of a segment, part, all or none.
48059 */
48060 static unsigned long vma_dump_size(struct vm_area_struct *vma,
48061- unsigned long mm_flags)
48062+ unsigned long mm_flags, long signr)
48063 {
48064 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
48065
48066@@ -1152,7 +1598,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
48067 if (vma->vm_file == NULL)
48068 return 0;
48069
48070- if (FILTER(MAPPED_PRIVATE))
48071+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
48072 goto whole;
48073
48074 /*
48075@@ -1374,9 +1820,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
48076 {
48077 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
48078 int i = 0;
48079- do
48080+ do {
48081 i += 2;
48082- while (auxv[i - 2] != AT_NULL);
48083+ } while (auxv[i - 2] != AT_NULL);
48084 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
48085 }
48086
48087@@ -2006,14 +2452,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
48088 }
48089
48090 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
48091- unsigned long mm_flags)
48092+ struct coredump_params *cprm)
48093 {
48094 struct vm_area_struct *vma;
48095 size_t size = 0;
48096
48097 for (vma = first_vma(current, gate_vma); vma != NULL;
48098 vma = next_vma(vma, gate_vma))
48099- size += vma_dump_size(vma, mm_flags);
48100+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48101 return size;
48102 }
48103
48104@@ -2107,7 +2553,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48105
48106 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
48107
48108- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
48109+ offset += elf_core_vma_data_size(gate_vma, cprm);
48110 offset += elf_core_extra_data_size();
48111 e_shoff = offset;
48112
48113@@ -2121,10 +2567,12 @@ static int elf_core_dump(struct coredump_params *cprm)
48114 offset = dataoff;
48115
48116 size += sizeof(*elf);
48117+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48118 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
48119 goto end_coredump;
48120
48121 size += sizeof(*phdr4note);
48122+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48123 if (size > cprm->limit
48124 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
48125 goto end_coredump;
48126@@ -2138,7 +2586,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48127 phdr.p_offset = offset;
48128 phdr.p_vaddr = vma->vm_start;
48129 phdr.p_paddr = 0;
48130- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
48131+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48132 phdr.p_memsz = vma->vm_end - vma->vm_start;
48133 offset += phdr.p_filesz;
48134 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
48135@@ -2149,6 +2597,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48136 phdr.p_align = ELF_EXEC_PAGESIZE;
48137
48138 size += sizeof(phdr);
48139+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48140 if (size > cprm->limit
48141 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
48142 goto end_coredump;
48143@@ -2173,7 +2622,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48144 unsigned long addr;
48145 unsigned long end;
48146
48147- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
48148+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
48149
48150 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
48151 struct page *page;
48152@@ -2182,6 +2631,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48153 page = get_dump_page(addr);
48154 if (page) {
48155 void *kaddr = kmap(page);
48156+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
48157 stop = ((size += PAGE_SIZE) > cprm->limit) ||
48158 !dump_write(cprm->file, kaddr,
48159 PAGE_SIZE);
48160@@ -2199,6 +2649,7 @@ static int elf_core_dump(struct coredump_params *cprm)
48161
48162 if (e_phnum == PN_XNUM) {
48163 size += sizeof(*shdr4extnum);
48164+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
48165 if (size > cprm->limit
48166 || !dump_write(cprm->file, shdr4extnum,
48167 sizeof(*shdr4extnum)))
48168@@ -2219,6 +2670,97 @@ out:
48169
48170 #endif /* CONFIG_ELF_CORE */
48171
48172+#ifdef CONFIG_PAX_MPROTECT
48173+/* PaX: non-PIC ELF libraries need relocations on their executable segments
48174+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
48175+ * we'll remove VM_MAYWRITE for good on RELRO segments.
48176+ *
48177+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
48178+ * basis because we want to allow the common case and not the special ones.
48179+ */
48180+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
48181+{
48182+ struct elfhdr elf_h;
48183+ struct elf_phdr elf_p;
48184+ unsigned long i;
48185+ unsigned long oldflags;
48186+ bool is_textrel_rw, is_textrel_rx, is_relro;
48187+
48188+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
48189+ return;
48190+
48191+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
48192+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
48193+
48194+#ifdef CONFIG_PAX_ELFRELOCS
48195+ /* possible TEXTREL */
48196+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
48197+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
48198+#else
48199+ is_textrel_rw = false;
48200+ is_textrel_rx = false;
48201+#endif
48202+
48203+ /* possible RELRO */
48204+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
48205+
48206+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
48207+ return;
48208+
48209+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
48210+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
48211+
48212+#ifdef CONFIG_PAX_ETEXECRELOCS
48213+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48214+#else
48215+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
48216+#endif
48217+
48218+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
48219+ !elf_check_arch(&elf_h) ||
48220+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
48221+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
48222+ return;
48223+
48224+ for (i = 0UL; i < elf_h.e_phnum; i++) {
48225+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
48226+ return;
48227+ switch (elf_p.p_type) {
48228+ case PT_DYNAMIC:
48229+ if (!is_textrel_rw && !is_textrel_rx)
48230+ continue;
48231+ i = 0UL;
48232+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
48233+ elf_dyn dyn;
48234+
48235+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
48236+ return;
48237+ if (dyn.d_tag == DT_NULL)
48238+ return;
48239+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
48240+ gr_log_textrel(vma);
48241+ if (is_textrel_rw)
48242+ vma->vm_flags |= VM_MAYWRITE;
48243+ else
48244+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
48245+ vma->vm_flags &= ~VM_MAYWRITE;
48246+ return;
48247+ }
48248+ i++;
48249+ }
48250+ return;
48251+
48252+ case PT_GNU_RELRO:
48253+ if (!is_relro)
48254+ continue;
48255+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
48256+ vma->vm_flags &= ~VM_MAYWRITE;
48257+ return;
48258+ }
48259+ }
48260+}
48261+#endif
48262+
48263 static int __init init_elf_binfmt(void)
48264 {
48265 register_binfmt(&elf_format);
48266diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
48267index b563719..3868998 100644
48268--- a/fs/binfmt_flat.c
48269+++ b/fs/binfmt_flat.c
48270@@ -562,7 +562,9 @@ static int load_flat_file(struct linux_binprm * bprm,
48271 realdatastart = (unsigned long) -ENOMEM;
48272 printk("Unable to allocate RAM for process data, errno %d\n",
48273 (int)-realdatastart);
48274+ down_write(&current->mm->mmap_sem);
48275 vm_munmap(textpos, text_len);
48276+ up_write(&current->mm->mmap_sem);
48277 ret = realdatastart;
48278 goto err;
48279 }
48280@@ -586,8 +588,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48281 }
48282 if (IS_ERR_VALUE(result)) {
48283 printk("Unable to read data+bss, errno %d\n", (int)-result);
48284+ down_write(&current->mm->mmap_sem);
48285 vm_munmap(textpos, text_len);
48286 vm_munmap(realdatastart, len);
48287+ up_write(&current->mm->mmap_sem);
48288 ret = result;
48289 goto err;
48290 }
48291@@ -654,8 +658,10 @@ static int load_flat_file(struct linux_binprm * bprm,
48292 }
48293 if (IS_ERR_VALUE(result)) {
48294 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
48295+ down_write(&current->mm->mmap_sem);
48296 vm_munmap(textpos, text_len + data_len + extra +
48297 MAX_SHARED_LIBS * sizeof(unsigned long));
48298+ up_write(&current->mm->mmap_sem);
48299 ret = result;
48300 goto err;
48301 }
48302diff --git a/fs/bio.c b/fs/bio.c
48303index b96fc6c..431d628 100644
48304--- a/fs/bio.c
48305+++ b/fs/bio.c
48306@@ -818,7 +818,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
48307 /*
48308 * Overflow, abort
48309 */
48310- if (end < start)
48311+ if (end < start || end - start > INT_MAX - nr_pages)
48312 return ERR_PTR(-EINVAL);
48313
48314 nr_pages += end - start;
48315@@ -952,7 +952,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
48316 /*
48317 * Overflow, abort
48318 */
48319- if (end < start)
48320+ if (end < start || end - start > INT_MAX - nr_pages)
48321 return ERR_PTR(-EINVAL);
48322
48323 nr_pages += end - start;
48324@@ -1214,7 +1214,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
48325 const int read = bio_data_dir(bio) == READ;
48326 struct bio_map_data *bmd = bio->bi_private;
48327 int i;
48328- char *p = bmd->sgvecs[0].iov_base;
48329+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
48330
48331 __bio_for_each_segment(bvec, bio, i, 0) {
48332 char *addr = page_address(bvec->bv_page);
48333diff --git a/fs/block_dev.c b/fs/block_dev.c
48334index 78333a3..23dcb4d 100644
48335--- a/fs/block_dev.c
48336+++ b/fs/block_dev.c
48337@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
48338 else if (bdev->bd_contains == bdev)
48339 return true; /* is a whole device which isn't held */
48340
48341- else if (whole->bd_holder == bd_may_claim)
48342+ else if (whole->bd_holder == (void *)bd_may_claim)
48343 return true; /* is a partition of a device that is being partitioned */
48344 else if (whole->bd_holder != NULL)
48345 return false; /* is a partition of a held device */
48346diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
48347index eea5da7..88fead70 100644
48348--- a/fs/btrfs/ctree.c
48349+++ b/fs/btrfs/ctree.c
48350@@ -1033,9 +1033,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
48351 free_extent_buffer(buf);
48352 add_root_to_dirty_list(root);
48353 } else {
48354- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
48355- parent_start = parent->start;
48356- else
48357+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
48358+ if (parent)
48359+ parent_start = parent->start;
48360+ else
48361+ parent_start = 0;
48362+ } else
48363 parent_start = 0;
48364
48365 WARN_ON(trans->transid != btrfs_header_generation(parent));
48366diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
48367index cc93b23..f3c42bf 100644
48368--- a/fs/btrfs/inode.c
48369+++ b/fs/btrfs/inode.c
48370@@ -7296,7 +7296,7 @@ fail:
48371 return -ENOMEM;
48372 }
48373
48374-static int btrfs_getattr(struct vfsmount *mnt,
48375+int btrfs_getattr(struct vfsmount *mnt,
48376 struct dentry *dentry, struct kstat *stat)
48377 {
48378 struct inode *inode = dentry->d_inode;
48379@@ -7310,6 +7310,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
48380 return 0;
48381 }
48382
48383+EXPORT_SYMBOL(btrfs_getattr);
48384+
48385+dev_t get_btrfs_dev_from_inode(struct inode *inode)
48386+{
48387+ return BTRFS_I(inode)->root->anon_dev;
48388+}
48389+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
48390+
48391 /*
48392 * If a file is moved, it will inherit the cow and compression flags of the new
48393 * directory.
48394diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
48395index 338f259..b657640 100644
48396--- a/fs/btrfs/ioctl.c
48397+++ b/fs/btrfs/ioctl.c
48398@@ -3033,9 +3033,12 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48399 for (i = 0; i < num_types; i++) {
48400 struct btrfs_space_info *tmp;
48401
48402+ /* Don't copy in more than we allocated */
48403 if (!slot_count)
48404 break;
48405
48406+ slot_count--;
48407+
48408 info = NULL;
48409 rcu_read_lock();
48410 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
48411@@ -3057,10 +3060,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
48412 memcpy(dest, &space, sizeof(space));
48413 dest++;
48414 space_args.total_spaces++;
48415- slot_count--;
48416 }
48417- if (!slot_count)
48418- break;
48419 }
48420 up_read(&info->groups_sem);
48421 }
48422diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
48423index 300e09a..9fe4539 100644
48424--- a/fs/btrfs/relocation.c
48425+++ b/fs/btrfs/relocation.c
48426@@ -1269,7 +1269,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
48427 }
48428 spin_unlock(&rc->reloc_root_tree.lock);
48429
48430- BUG_ON((struct btrfs_root *)node->data != root);
48431+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
48432
48433 if (!del) {
48434 spin_lock(&rc->reloc_root_tree.lock);
48435diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
48436index d8982e9..29a85fa 100644
48437--- a/fs/btrfs/super.c
48438+++ b/fs/btrfs/super.c
48439@@ -267,7 +267,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
48440 function, line, errstr);
48441 return;
48442 }
48443- ACCESS_ONCE(trans->transaction->aborted) = errno;
48444+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
48445 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
48446 }
48447 /*
48448diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
48449index 622f469..e8d2d55 100644
48450--- a/fs/cachefiles/bind.c
48451+++ b/fs/cachefiles/bind.c
48452@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
48453 args);
48454
48455 /* start by checking things over */
48456- ASSERT(cache->fstop_percent >= 0 &&
48457- cache->fstop_percent < cache->fcull_percent &&
48458+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
48459 cache->fcull_percent < cache->frun_percent &&
48460 cache->frun_percent < 100);
48461
48462- ASSERT(cache->bstop_percent >= 0 &&
48463- cache->bstop_percent < cache->bcull_percent &&
48464+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
48465 cache->bcull_percent < cache->brun_percent &&
48466 cache->brun_percent < 100);
48467
48468diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
48469index 0a1467b..6a53245 100644
48470--- a/fs/cachefiles/daemon.c
48471+++ b/fs/cachefiles/daemon.c
48472@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
48473 if (n > buflen)
48474 return -EMSGSIZE;
48475
48476- if (copy_to_user(_buffer, buffer, n) != 0)
48477+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
48478 return -EFAULT;
48479
48480 return n;
48481@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
48482 if (test_bit(CACHEFILES_DEAD, &cache->flags))
48483 return -EIO;
48484
48485- if (datalen < 0 || datalen > PAGE_SIZE - 1)
48486+ if (datalen > PAGE_SIZE - 1)
48487 return -EOPNOTSUPP;
48488
48489 /* drag the command string into the kernel so we can parse it */
48490@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
48491 if (args[0] != '%' || args[1] != '\0')
48492 return -EINVAL;
48493
48494- if (fstop < 0 || fstop >= cache->fcull_percent)
48495+ if (fstop >= cache->fcull_percent)
48496 return cachefiles_daemon_range_error(cache, args);
48497
48498 cache->fstop_percent = fstop;
48499@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
48500 if (args[0] != '%' || args[1] != '\0')
48501 return -EINVAL;
48502
48503- if (bstop < 0 || bstop >= cache->bcull_percent)
48504+ if (bstop >= cache->bcull_percent)
48505 return cachefiles_daemon_range_error(cache, args);
48506
48507 cache->bstop_percent = bstop;
48508diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
48509index 4938251..7e01445 100644
48510--- a/fs/cachefiles/internal.h
48511+++ b/fs/cachefiles/internal.h
48512@@ -59,7 +59,7 @@ struct cachefiles_cache {
48513 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
48514 struct rb_root active_nodes; /* active nodes (can't be culled) */
48515 rwlock_t active_lock; /* lock for active_nodes */
48516- atomic_t gravecounter; /* graveyard uniquifier */
48517+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
48518 unsigned frun_percent; /* when to stop culling (% files) */
48519 unsigned fcull_percent; /* when to start culling (% files) */
48520 unsigned fstop_percent; /* when to stop allocating (% files) */
48521@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
48522 * proc.c
48523 */
48524 #ifdef CONFIG_CACHEFILES_HISTOGRAM
48525-extern atomic_t cachefiles_lookup_histogram[HZ];
48526-extern atomic_t cachefiles_mkdir_histogram[HZ];
48527-extern atomic_t cachefiles_create_histogram[HZ];
48528+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48529+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48530+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
48531
48532 extern int __init cachefiles_proc_init(void);
48533 extern void cachefiles_proc_cleanup(void);
48534 static inline
48535-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
48536+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
48537 {
48538 unsigned long jif = jiffies - start_jif;
48539 if (jif >= HZ)
48540 jif = HZ - 1;
48541- atomic_inc(&histogram[jif]);
48542+ atomic_inc_unchecked(&histogram[jif]);
48543 }
48544
48545 #else
48546diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
48547index 8c01c5fc..15f982e 100644
48548--- a/fs/cachefiles/namei.c
48549+++ b/fs/cachefiles/namei.c
48550@@ -317,7 +317,7 @@ try_again:
48551 /* first step is to make up a grave dentry in the graveyard */
48552 sprintf(nbuffer, "%08x%08x",
48553 (uint32_t) get_seconds(),
48554- (uint32_t) atomic_inc_return(&cache->gravecounter));
48555+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
48556
48557 /* do the multiway lock magic */
48558 trap = lock_rename(cache->graveyard, dir);
48559diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
48560index eccd339..4c1d995 100644
48561--- a/fs/cachefiles/proc.c
48562+++ b/fs/cachefiles/proc.c
48563@@ -14,9 +14,9 @@
48564 #include <linux/seq_file.h>
48565 #include "internal.h"
48566
48567-atomic_t cachefiles_lookup_histogram[HZ];
48568-atomic_t cachefiles_mkdir_histogram[HZ];
48569-atomic_t cachefiles_create_histogram[HZ];
48570+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
48571+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
48572+atomic_unchecked_t cachefiles_create_histogram[HZ];
48573
48574 /*
48575 * display the latency histogram
48576@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
48577 return 0;
48578 default:
48579 index = (unsigned long) v - 3;
48580- x = atomic_read(&cachefiles_lookup_histogram[index]);
48581- y = atomic_read(&cachefiles_mkdir_histogram[index]);
48582- z = atomic_read(&cachefiles_create_histogram[index]);
48583+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
48584+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
48585+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
48586 if (x == 0 && y == 0 && z == 0)
48587 return 0;
48588
48589diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
48590index 4809922..aab2c39 100644
48591--- a/fs/cachefiles/rdwr.c
48592+++ b/fs/cachefiles/rdwr.c
48593@@ -965,7 +965,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
48594 old_fs = get_fs();
48595 set_fs(KERNEL_DS);
48596 ret = file->f_op->write(
48597- file, (const void __user *) data, len, &pos);
48598+ file, (const void __force_user *) data, len, &pos);
48599 set_fs(old_fs);
48600 kunmap(page);
48601 if (ret != len)
48602diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
48603index 8c1aabe..bbf856a 100644
48604--- a/fs/ceph/dir.c
48605+++ b/fs/ceph/dir.c
48606@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
48607 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
48608 struct ceph_mds_client *mdsc = fsc->mdsc;
48609 unsigned frag = fpos_frag(filp->f_pos);
48610- int off = fpos_off(filp->f_pos);
48611+ unsigned int off = fpos_off(filp->f_pos);
48612 int err;
48613 u32 ftype;
48614 struct ceph_mds_reply_info_parsed *rinfo;
48615diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
48616index d9ea6ed..1e6c8ac 100644
48617--- a/fs/cifs/cifs_debug.c
48618+++ b/fs/cifs/cifs_debug.c
48619@@ -267,8 +267,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48620
48621 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
48622 #ifdef CONFIG_CIFS_STATS2
48623- atomic_set(&totBufAllocCount, 0);
48624- atomic_set(&totSmBufAllocCount, 0);
48625+ atomic_set_unchecked(&totBufAllocCount, 0);
48626+ atomic_set_unchecked(&totSmBufAllocCount, 0);
48627 #endif /* CONFIG_CIFS_STATS2 */
48628 spin_lock(&cifs_tcp_ses_lock);
48629 list_for_each(tmp1, &cifs_tcp_ses_list) {
48630@@ -281,7 +281,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
48631 tcon = list_entry(tmp3,
48632 struct cifs_tcon,
48633 tcon_list);
48634- atomic_set(&tcon->num_smbs_sent, 0);
48635+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
48636 if (server->ops->clear_stats)
48637 server->ops->clear_stats(tcon);
48638 }
48639@@ -313,8 +313,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48640 smBufAllocCount.counter, cifs_min_small);
48641 #ifdef CONFIG_CIFS_STATS2
48642 seq_printf(m, "Total Large %d Small %d Allocations\n",
48643- atomic_read(&totBufAllocCount),
48644- atomic_read(&totSmBufAllocCount));
48645+ atomic_read_unchecked(&totBufAllocCount),
48646+ atomic_read_unchecked(&totSmBufAllocCount));
48647 #endif /* CONFIG_CIFS_STATS2 */
48648
48649 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
48650@@ -343,7 +343,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
48651 if (tcon->need_reconnect)
48652 seq_puts(m, "\tDISCONNECTED ");
48653 seq_printf(m, "\nSMBs: %d",
48654- atomic_read(&tcon->num_smbs_sent));
48655+ atomic_read_unchecked(&tcon->num_smbs_sent));
48656 if (server->ops->print_stats)
48657 server->ops->print_stats(m, tcon);
48658 }
48659diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
48660index de7f916..6cb22a9 100644
48661--- a/fs/cifs/cifsfs.c
48662+++ b/fs/cifs/cifsfs.c
48663@@ -997,7 +997,7 @@ cifs_init_request_bufs(void)
48664 /* cERROR(1, "CIFSMaxBufSize %d 0x%x",CIFSMaxBufSize,CIFSMaxBufSize); */
48665 cifs_req_cachep = kmem_cache_create("cifs_request",
48666 CIFSMaxBufSize + max_hdr_size, 0,
48667- SLAB_HWCACHE_ALIGN, NULL);
48668+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
48669 if (cifs_req_cachep == NULL)
48670 return -ENOMEM;
48671
48672@@ -1024,7 +1024,7 @@ cifs_init_request_bufs(void)
48673 efficient to alloc 1 per page off the slab compared to 17K (5page)
48674 alloc of large cifs buffers even when page debugging is on */
48675 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
48676- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
48677+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
48678 NULL);
48679 if (cifs_sm_req_cachep == NULL) {
48680 mempool_destroy(cifs_req_poolp);
48681@@ -1109,8 +1109,8 @@ init_cifs(void)
48682 atomic_set(&bufAllocCount, 0);
48683 atomic_set(&smBufAllocCount, 0);
48684 #ifdef CONFIG_CIFS_STATS2
48685- atomic_set(&totBufAllocCount, 0);
48686- atomic_set(&totSmBufAllocCount, 0);
48687+ atomic_set_unchecked(&totBufAllocCount, 0);
48688+ atomic_set_unchecked(&totSmBufAllocCount, 0);
48689 #endif /* CONFIG_CIFS_STATS2 */
48690
48691 atomic_set(&midCount, 0);
48692diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
48693index e6899ce..d6b2920 100644
48694--- a/fs/cifs/cifsglob.h
48695+++ b/fs/cifs/cifsglob.h
48696@@ -751,35 +751,35 @@ struct cifs_tcon {
48697 __u16 Flags; /* optional support bits */
48698 enum statusEnum tidStatus;
48699 #ifdef CONFIG_CIFS_STATS
48700- atomic_t num_smbs_sent;
48701+ atomic_unchecked_t num_smbs_sent;
48702 union {
48703 struct {
48704- atomic_t num_writes;
48705- atomic_t num_reads;
48706- atomic_t num_flushes;
48707- atomic_t num_oplock_brks;
48708- atomic_t num_opens;
48709- atomic_t num_closes;
48710- atomic_t num_deletes;
48711- atomic_t num_mkdirs;
48712- atomic_t num_posixopens;
48713- atomic_t num_posixmkdirs;
48714- atomic_t num_rmdirs;
48715- atomic_t num_renames;
48716- atomic_t num_t2renames;
48717- atomic_t num_ffirst;
48718- atomic_t num_fnext;
48719- atomic_t num_fclose;
48720- atomic_t num_hardlinks;
48721- atomic_t num_symlinks;
48722- atomic_t num_locks;
48723- atomic_t num_acl_get;
48724- atomic_t num_acl_set;
48725+ atomic_unchecked_t num_writes;
48726+ atomic_unchecked_t num_reads;
48727+ atomic_unchecked_t num_flushes;
48728+ atomic_unchecked_t num_oplock_brks;
48729+ atomic_unchecked_t num_opens;
48730+ atomic_unchecked_t num_closes;
48731+ atomic_unchecked_t num_deletes;
48732+ atomic_unchecked_t num_mkdirs;
48733+ atomic_unchecked_t num_posixopens;
48734+ atomic_unchecked_t num_posixmkdirs;
48735+ atomic_unchecked_t num_rmdirs;
48736+ atomic_unchecked_t num_renames;
48737+ atomic_unchecked_t num_t2renames;
48738+ atomic_unchecked_t num_ffirst;
48739+ atomic_unchecked_t num_fnext;
48740+ atomic_unchecked_t num_fclose;
48741+ atomic_unchecked_t num_hardlinks;
48742+ atomic_unchecked_t num_symlinks;
48743+ atomic_unchecked_t num_locks;
48744+ atomic_unchecked_t num_acl_get;
48745+ atomic_unchecked_t num_acl_set;
48746 } cifs_stats;
48747 #ifdef CONFIG_CIFS_SMB2
48748 struct {
48749- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
48750- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
48751+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
48752+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
48753 } smb2_stats;
48754 #endif /* CONFIG_CIFS_SMB2 */
48755 } stats;
48756@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
48757 }
48758
48759 #ifdef CONFIG_CIFS_STATS
48760-#define cifs_stats_inc atomic_inc
48761+#define cifs_stats_inc atomic_inc_unchecked
48762
48763 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
48764 unsigned int bytes)
48765@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
48766 /* Various Debug counters */
48767 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
48768 #ifdef CONFIG_CIFS_STATS2
48769-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
48770-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
48771+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
48772+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
48773 #endif
48774 GLOBAL_EXTERN atomic_t smBufAllocCount;
48775 GLOBAL_EXTERN atomic_t midCount;
48776diff --git a/fs/cifs/link.c b/fs/cifs/link.c
48777index 51dc2fb..1e12a33 100644
48778--- a/fs/cifs/link.c
48779+++ b/fs/cifs/link.c
48780@@ -616,7 +616,7 @@ symlink_exit:
48781
48782 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
48783 {
48784- char *p = nd_get_link(nd);
48785+ const char *p = nd_get_link(nd);
48786 if (!IS_ERR(p))
48787 kfree(p);
48788 }
48789diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
48790index 3a00c0d..42d901c 100644
48791--- a/fs/cifs/misc.c
48792+++ b/fs/cifs/misc.c
48793@@ -169,7 +169,7 @@ cifs_buf_get(void)
48794 memset(ret_buf, 0, buf_size + 3);
48795 atomic_inc(&bufAllocCount);
48796 #ifdef CONFIG_CIFS_STATS2
48797- atomic_inc(&totBufAllocCount);
48798+ atomic_inc_unchecked(&totBufAllocCount);
48799 #endif /* CONFIG_CIFS_STATS2 */
48800 }
48801
48802@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
48803 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
48804 atomic_inc(&smBufAllocCount);
48805 #ifdef CONFIG_CIFS_STATS2
48806- atomic_inc(&totSmBufAllocCount);
48807+ atomic_inc_unchecked(&totSmBufAllocCount);
48808 #endif /* CONFIG_CIFS_STATS2 */
48809
48810 }
48811diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
48812index 47bc5a8..10decbe 100644
48813--- a/fs/cifs/smb1ops.c
48814+++ b/fs/cifs/smb1ops.c
48815@@ -586,27 +586,27 @@ static void
48816 cifs_clear_stats(struct cifs_tcon *tcon)
48817 {
48818 #ifdef CONFIG_CIFS_STATS
48819- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
48820- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
48821- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
48822- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
48823- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
48824- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
48825- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
48826- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
48827- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
48828- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
48829- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
48830- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
48831- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
48832- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
48833- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
48834- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
48835- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
48836- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
48837- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
48838- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
48839- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
48840+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
48841+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
48842+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
48843+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
48844+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
48845+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
48846+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
48847+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
48848+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
48849+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
48850+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
48851+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
48852+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
48853+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
48854+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
48855+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
48856+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
48857+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
48858+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
48859+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
48860+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
48861 #endif
48862 }
48863
48864@@ -615,36 +615,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
48865 {
48866 #ifdef CONFIG_CIFS_STATS
48867 seq_printf(m, " Oplocks breaks: %d",
48868- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
48869+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
48870 seq_printf(m, "\nReads: %d Bytes: %llu",
48871- atomic_read(&tcon->stats.cifs_stats.num_reads),
48872+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
48873 (long long)(tcon->bytes_read));
48874 seq_printf(m, "\nWrites: %d Bytes: %llu",
48875- atomic_read(&tcon->stats.cifs_stats.num_writes),
48876+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
48877 (long long)(tcon->bytes_written));
48878 seq_printf(m, "\nFlushes: %d",
48879- atomic_read(&tcon->stats.cifs_stats.num_flushes));
48880+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
48881 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
48882- atomic_read(&tcon->stats.cifs_stats.num_locks),
48883- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
48884- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
48885+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
48886+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
48887+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
48888 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
48889- atomic_read(&tcon->stats.cifs_stats.num_opens),
48890- atomic_read(&tcon->stats.cifs_stats.num_closes),
48891- atomic_read(&tcon->stats.cifs_stats.num_deletes));
48892+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
48893+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
48894+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
48895 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
48896- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
48897- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
48898+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
48899+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
48900 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
48901- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
48902- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
48903+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
48904+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
48905 seq_printf(m, "\nRenames: %d T2 Renames %d",
48906- atomic_read(&tcon->stats.cifs_stats.num_renames),
48907- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
48908+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
48909+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
48910 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
48911- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
48912- atomic_read(&tcon->stats.cifs_stats.num_fnext),
48913- atomic_read(&tcon->stats.cifs_stats.num_fclose));
48914+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
48915+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
48916+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
48917 #endif
48918 }
48919
48920diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
48921index c9c7aa7..065056a 100644
48922--- a/fs/cifs/smb2ops.c
48923+++ b/fs/cifs/smb2ops.c
48924@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
48925 #ifdef CONFIG_CIFS_STATS
48926 int i;
48927 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
48928- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
48929- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
48930+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
48931+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
48932 }
48933 #endif
48934 }
48935@@ -284,66 +284,66 @@ static void
48936 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
48937 {
48938 #ifdef CONFIG_CIFS_STATS
48939- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
48940- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
48941+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
48942+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
48943 seq_printf(m, "\nNegotiates: %d sent %d failed",
48944- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
48945- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
48946+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
48947+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
48948 seq_printf(m, "\nSessionSetups: %d sent %d failed",
48949- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
48950- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
48951+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
48952+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
48953 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
48954 seq_printf(m, "\nLogoffs: %d sent %d failed",
48955- atomic_read(&sent[SMB2_LOGOFF_HE]),
48956- atomic_read(&failed[SMB2_LOGOFF_HE]));
48957+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
48958+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
48959 seq_printf(m, "\nTreeConnects: %d sent %d failed",
48960- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
48961- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
48962+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
48963+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
48964 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
48965- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
48966- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
48967+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
48968+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
48969 seq_printf(m, "\nCreates: %d sent %d failed",
48970- atomic_read(&sent[SMB2_CREATE_HE]),
48971- atomic_read(&failed[SMB2_CREATE_HE]));
48972+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
48973+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
48974 seq_printf(m, "\nCloses: %d sent %d failed",
48975- atomic_read(&sent[SMB2_CLOSE_HE]),
48976- atomic_read(&failed[SMB2_CLOSE_HE]));
48977+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
48978+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
48979 seq_printf(m, "\nFlushes: %d sent %d failed",
48980- atomic_read(&sent[SMB2_FLUSH_HE]),
48981- atomic_read(&failed[SMB2_FLUSH_HE]));
48982+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
48983+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
48984 seq_printf(m, "\nReads: %d sent %d failed",
48985- atomic_read(&sent[SMB2_READ_HE]),
48986- atomic_read(&failed[SMB2_READ_HE]));
48987+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
48988+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
48989 seq_printf(m, "\nWrites: %d sent %d failed",
48990- atomic_read(&sent[SMB2_WRITE_HE]),
48991- atomic_read(&failed[SMB2_WRITE_HE]));
48992+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
48993+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
48994 seq_printf(m, "\nLocks: %d sent %d failed",
48995- atomic_read(&sent[SMB2_LOCK_HE]),
48996- atomic_read(&failed[SMB2_LOCK_HE]));
48997+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
48998+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
48999 seq_printf(m, "\nIOCTLs: %d sent %d failed",
49000- atomic_read(&sent[SMB2_IOCTL_HE]),
49001- atomic_read(&failed[SMB2_IOCTL_HE]));
49002+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
49003+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
49004 seq_printf(m, "\nCancels: %d sent %d failed",
49005- atomic_read(&sent[SMB2_CANCEL_HE]),
49006- atomic_read(&failed[SMB2_CANCEL_HE]));
49007+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
49008+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
49009 seq_printf(m, "\nEchos: %d sent %d failed",
49010- atomic_read(&sent[SMB2_ECHO_HE]),
49011- atomic_read(&failed[SMB2_ECHO_HE]));
49012+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
49013+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
49014 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
49015- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
49016- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
49017+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
49018+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
49019 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
49020- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
49021- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
49022+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
49023+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
49024 seq_printf(m, "\nQueryInfos: %d sent %d failed",
49025- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
49026- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
49027+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
49028+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
49029 seq_printf(m, "\nSetInfos: %d sent %d failed",
49030- atomic_read(&sent[SMB2_SET_INFO_HE]),
49031- atomic_read(&failed[SMB2_SET_INFO_HE]));
49032+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
49033+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
49034 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
49035- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
49036- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
49037+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
49038+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
49039 #endif
49040 }
49041
49042diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
49043index 41d9d07..dbb4772 100644
49044--- a/fs/cifs/smb2pdu.c
49045+++ b/fs/cifs/smb2pdu.c
49046@@ -1761,8 +1761,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
49047 default:
49048 cERROR(1, "info level %u isn't supported",
49049 srch_inf->info_level);
49050- rc = -EINVAL;
49051- goto qdir_exit;
49052+ return -EINVAL;
49053 }
49054
49055 req->FileIndex = cpu_to_le32(index);
49056diff --git a/fs/coda/cache.c b/fs/coda/cache.c
49057index 958ae0e..505c9d0 100644
49058--- a/fs/coda/cache.c
49059+++ b/fs/coda/cache.c
49060@@ -24,7 +24,7 @@
49061 #include "coda_linux.h"
49062 #include "coda_cache.h"
49063
49064-static atomic_t permission_epoch = ATOMIC_INIT(0);
49065+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
49066
49067 /* replace or extend an acl cache hit */
49068 void coda_cache_enter(struct inode *inode, int mask)
49069@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
49070 struct coda_inode_info *cii = ITOC(inode);
49071
49072 spin_lock(&cii->c_lock);
49073- cii->c_cached_epoch = atomic_read(&permission_epoch);
49074+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
49075 if (cii->c_uid != current_fsuid()) {
49076 cii->c_uid = current_fsuid();
49077 cii->c_cached_perm = mask;
49078@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
49079 {
49080 struct coda_inode_info *cii = ITOC(inode);
49081 spin_lock(&cii->c_lock);
49082- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
49083+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
49084 spin_unlock(&cii->c_lock);
49085 }
49086
49087 /* remove all acl caches */
49088 void coda_cache_clear_all(struct super_block *sb)
49089 {
49090- atomic_inc(&permission_epoch);
49091+ atomic_inc_unchecked(&permission_epoch);
49092 }
49093
49094
49095@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
49096 spin_lock(&cii->c_lock);
49097 hit = (mask & cii->c_cached_perm) == mask &&
49098 cii->c_uid == current_fsuid() &&
49099- cii->c_cached_epoch == atomic_read(&permission_epoch);
49100+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
49101 spin_unlock(&cii->c_lock);
49102
49103 return hit;
49104diff --git a/fs/compat.c b/fs/compat.c
49105index 015e1e1..b8966ac 100644
49106--- a/fs/compat.c
49107+++ b/fs/compat.c
49108@@ -54,7 +54,7 @@
49109 #include <asm/ioctls.h>
49110 #include "internal.h"
49111
49112-int compat_log = 1;
49113+int compat_log = 0;
49114
49115 int compat_printk(const char *fmt, ...)
49116 {
49117@@ -490,7 +490,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
49118
49119 set_fs(KERNEL_DS);
49120 /* The __user pointer cast is valid because of the set_fs() */
49121- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
49122+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
49123 set_fs(oldfs);
49124 /* truncating is ok because it's a user address */
49125 if (!ret)
49126@@ -548,7 +548,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
49127 goto out;
49128
49129 ret = -EINVAL;
49130- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
49131+ if (nr_segs > UIO_MAXIOV)
49132 goto out;
49133 if (nr_segs > fast_segs) {
49134 ret = -ENOMEM;
49135@@ -831,6 +831,7 @@ struct compat_old_linux_dirent {
49136
49137 struct compat_readdir_callback {
49138 struct compat_old_linux_dirent __user *dirent;
49139+ struct file * file;
49140 int result;
49141 };
49142
49143@@ -848,6 +849,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
49144 buf->result = -EOVERFLOW;
49145 return -EOVERFLOW;
49146 }
49147+
49148+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49149+ return 0;
49150+
49151 buf->result++;
49152 dirent = buf->dirent;
49153 if (!access_ok(VERIFY_WRITE, dirent,
49154@@ -878,6 +883,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
49155
49156 buf.result = 0;
49157 buf.dirent = dirent;
49158+ buf.file = f.file;
49159
49160 error = vfs_readdir(f.file, compat_fillonedir, &buf);
49161 if (buf.result)
49162@@ -897,6 +903,7 @@ struct compat_linux_dirent {
49163 struct compat_getdents_callback {
49164 struct compat_linux_dirent __user *current_dir;
49165 struct compat_linux_dirent __user *previous;
49166+ struct file * file;
49167 int count;
49168 int error;
49169 };
49170@@ -918,6 +925,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
49171 buf->error = -EOVERFLOW;
49172 return -EOVERFLOW;
49173 }
49174+
49175+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49176+ return 0;
49177+
49178 dirent = buf->previous;
49179 if (dirent) {
49180 if (__put_user(offset, &dirent->d_off))
49181@@ -963,6 +974,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49182 buf.previous = NULL;
49183 buf.count = count;
49184 buf.error = 0;
49185+ buf.file = f.file;
49186
49187 error = vfs_readdir(f.file, compat_filldir, &buf);
49188 if (error >= 0)
49189@@ -983,6 +995,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
49190 struct compat_getdents_callback64 {
49191 struct linux_dirent64 __user *current_dir;
49192 struct linux_dirent64 __user *previous;
49193+ struct file * file;
49194 int count;
49195 int error;
49196 };
49197@@ -999,6 +1012,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
49198 buf->error = -EINVAL; /* only used if we fail.. */
49199 if (reclen > buf->count)
49200 return -EINVAL;
49201+
49202+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
49203+ return 0;
49204+
49205 dirent = buf->previous;
49206
49207 if (dirent) {
49208@@ -1048,13 +1065,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
49209 buf.previous = NULL;
49210 buf.count = count;
49211 buf.error = 0;
49212+ buf.file = f.file;
49213
49214 error = vfs_readdir(f.file, compat_filldir64, &buf);
49215 if (error >= 0)
49216 error = buf.error;
49217 lastdirent = buf.previous;
49218 if (lastdirent) {
49219- typeof(lastdirent->d_off) d_off = f.file->f_pos;
49220+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
49221 if (__put_user_unaligned(d_off, &lastdirent->d_off))
49222 error = -EFAULT;
49223 else
49224diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
49225index a81147e..20bf2b5 100644
49226--- a/fs/compat_binfmt_elf.c
49227+++ b/fs/compat_binfmt_elf.c
49228@@ -30,11 +30,13 @@
49229 #undef elf_phdr
49230 #undef elf_shdr
49231 #undef elf_note
49232+#undef elf_dyn
49233 #undef elf_addr_t
49234 #define elfhdr elf32_hdr
49235 #define elf_phdr elf32_phdr
49236 #define elf_shdr elf32_shdr
49237 #define elf_note elf32_note
49238+#define elf_dyn Elf32_Dyn
49239 #define elf_addr_t Elf32_Addr
49240
49241 /*
49242diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
49243index e2f57a0..3c78771 100644
49244--- a/fs/compat_ioctl.c
49245+++ b/fs/compat_ioctl.c
49246@@ -623,7 +623,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
49247 return -EFAULT;
49248 if (__get_user(udata, &ss32->iomem_base))
49249 return -EFAULT;
49250- ss.iomem_base = compat_ptr(udata);
49251+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
49252 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
49253 __get_user(ss.port_high, &ss32->port_high))
49254 return -EFAULT;
49255@@ -798,7 +798,7 @@ static int compat_ioctl_preallocate(struct file *file,
49256 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
49257 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
49258 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
49259- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49260+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
49261 return -EFAULT;
49262
49263 return ioctl_preallocate(file, p);
49264@@ -1620,8 +1620,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
49265 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
49266 {
49267 unsigned int a, b;
49268- a = *(unsigned int *)p;
49269- b = *(unsigned int *)q;
49270+ a = *(const unsigned int *)p;
49271+ b = *(const unsigned int *)q;
49272 if (a > b)
49273 return 1;
49274 if (a < b)
49275diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
49276index 712b10f..c33c4ca 100644
49277--- a/fs/configfs/dir.c
49278+++ b/fs/configfs/dir.c
49279@@ -1037,10 +1037,11 @@ static int configfs_dump(struct configfs_dirent *sd, int level)
49280 static int configfs_depend_prep(struct dentry *origin,
49281 struct config_item *target)
49282 {
49283- struct configfs_dirent *child_sd, *sd = origin->d_fsdata;
49284+ struct configfs_dirent *child_sd, *sd;
49285 int ret = 0;
49286
49287- BUG_ON(!origin || !sd);
49288+ BUG_ON(!origin || !origin->d_fsdata);
49289+ sd = origin->d_fsdata;
49290
49291 if (sd->s_element == target) /* Boo-yah */
49292 goto out;
49293@@ -1564,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49294 }
49295 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
49296 struct configfs_dirent *next;
49297- const char * name;
49298+ const unsigned char * name;
49299+ char d_name[sizeof(next->s_dentry->d_iname)];
49300 int len;
49301 struct inode *inode = NULL;
49302
49303@@ -1574,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
49304 continue;
49305
49306 name = configfs_get_name(next);
49307- len = strlen(name);
49308+ if (next->s_dentry && name == next->s_dentry->d_iname) {
49309+ len = next->s_dentry->d_name.len;
49310+ memcpy(d_name, name, len);
49311+ name = d_name;
49312+ } else
49313+ len = strlen(name);
49314
49315 /*
49316 * We'll have a dentry and an inode for
49317diff --git a/fs/coredump.c b/fs/coredump.c
49318index 1774932..5812106 100644
49319--- a/fs/coredump.c
49320+++ b/fs/coredump.c
49321@@ -52,7 +52,7 @@ struct core_name {
49322 char *corename;
49323 int used, size;
49324 };
49325-static atomic_t call_count = ATOMIC_INIT(1);
49326+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
49327
49328 /* The maximal length of core_pattern is also specified in sysctl.c */
49329
49330@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
49331 {
49332 char *old_corename = cn->corename;
49333
49334- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
49335+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
49336 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
49337
49338 if (!cn->corename) {
49339@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
49340 int pid_in_pattern = 0;
49341 int err = 0;
49342
49343- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
49344+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
49345 cn->corename = kmalloc(cn->size, GFP_KERNEL);
49346 cn->used = 0;
49347
49348@@ -414,17 +414,17 @@ static void wait_for_dump_helpers(struct file *file)
49349 pipe = file->f_path.dentry->d_inode->i_pipe;
49350
49351 pipe_lock(pipe);
49352- pipe->readers++;
49353- pipe->writers--;
49354+ atomic_inc(&pipe->readers);
49355+ atomic_dec(&pipe->writers);
49356
49357- while ((pipe->readers > 1) && (!signal_pending(current))) {
49358+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49359 wake_up_interruptible_sync(&pipe->wait);
49360 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49361 pipe_wait(pipe);
49362 }
49363
49364- pipe->readers--;
49365- pipe->writers++;
49366+ atomic_dec(&pipe->readers);
49367+ atomic_inc(&pipe->writers);
49368 pipe_unlock(pipe);
49369
49370 }
49371@@ -471,7 +471,8 @@ void do_coredump(siginfo_t *siginfo)
49372 int ispipe;
49373 struct files_struct *displaced;
49374 bool need_nonrelative = false;
49375- static atomic_t core_dump_count = ATOMIC_INIT(0);
49376+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49377+ long signr = siginfo->si_signo;
49378 struct coredump_params cprm = {
49379 .siginfo = siginfo,
49380 .regs = signal_pt_regs(),
49381@@ -484,7 +485,10 @@ void do_coredump(siginfo_t *siginfo)
49382 .mm_flags = mm->flags,
49383 };
49384
49385- audit_core_dumps(siginfo->si_signo);
49386+ audit_core_dumps(signr);
49387+
49388+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49389+ gr_handle_brute_attach(cprm.mm_flags);
49390
49391 binfmt = mm->binfmt;
49392 if (!binfmt || !binfmt->core_dump)
49393@@ -508,7 +512,7 @@ void do_coredump(siginfo_t *siginfo)
49394 need_nonrelative = true;
49395 }
49396
49397- retval = coredump_wait(siginfo->si_signo, &core_state);
49398+ retval = coredump_wait(signr, &core_state);
49399 if (retval < 0)
49400 goto fail_creds;
49401
49402@@ -556,7 +560,7 @@ void do_coredump(siginfo_t *siginfo)
49403 }
49404 cprm.limit = RLIM_INFINITY;
49405
49406- dump_count = atomic_inc_return(&core_dump_count);
49407+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49408 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49409 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49410 task_tgid_vnr(current), current->comm);
49411@@ -583,6 +587,8 @@ void do_coredump(siginfo_t *siginfo)
49412 } else {
49413 struct inode *inode;
49414
49415+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49416+
49417 if (cprm.limit < binfmt->min_coredump)
49418 goto fail_unlock;
49419
49420@@ -640,7 +646,7 @@ close_fail:
49421 filp_close(cprm.file, NULL);
49422 fail_dropcount:
49423 if (ispipe)
49424- atomic_dec(&core_dump_count);
49425+ atomic_dec_unchecked(&core_dump_count);
49426 fail_unlock:
49427 kfree(cn.corename);
49428 fail_corename:
49429@@ -659,7 +665,7 @@ fail:
49430 */
49431 int dump_write(struct file *file, const void *addr, int nr)
49432 {
49433- return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, addr, nr, &file->f_pos) == nr;
49434+ return access_ok(VERIFY_READ, addr, nr) && file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
49435 }
49436 EXPORT_SYMBOL(dump_write);
49437
49438diff --git a/fs/dcache.c b/fs/dcache.c
49439index 19153a0..428c2f5 100644
49440--- a/fs/dcache.c
49441+++ b/fs/dcache.c
49442@@ -3133,7 +3133,7 @@ void __init vfs_caches_init(unsigned long mempages)
49443 mempages -= reserve;
49444
49445 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
49446- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
49447+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
49448
49449 dcache_init();
49450 inode_init();
49451diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
49452index a5f12b7..4ee8a6f 100644
49453--- a/fs/debugfs/inode.c
49454+++ b/fs/debugfs/inode.c
49455@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
49456 */
49457 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
49458 {
49459+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
49460+ return __create_file(name, S_IFDIR | S_IRWXU,
49461+#else
49462 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
49463+#endif
49464 parent, NULL, NULL);
49465 }
49466 EXPORT_SYMBOL_GPL(debugfs_create_dir);
49467diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
49468index cc7709e..7e7211f 100644
49469--- a/fs/ecryptfs/inode.c
49470+++ b/fs/ecryptfs/inode.c
49471@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
49472 old_fs = get_fs();
49473 set_fs(get_ds());
49474 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
49475- (char __user *)lower_buf,
49476+ (char __force_user *)lower_buf,
49477 PATH_MAX);
49478 set_fs(old_fs);
49479 if (rc < 0)
49480@@ -706,7 +706,7 @@ out:
49481 static void
49482 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
49483 {
49484- char *buf = nd_get_link(nd);
49485+ const char *buf = nd_get_link(nd);
49486 if (!IS_ERR(buf)) {
49487 /* Free the char* */
49488 kfree(buf);
49489diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
49490index 412e6ed..4292d22 100644
49491--- a/fs/ecryptfs/miscdev.c
49492+++ b/fs/ecryptfs/miscdev.c
49493@@ -315,7 +315,7 @@ check_list:
49494 goto out_unlock_msg_ctx;
49495 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
49496 if (msg_ctx->msg) {
49497- if (copy_to_user(&buf[i], packet_length, packet_length_size))
49498+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
49499 goto out_unlock_msg_ctx;
49500 i += packet_length_size;
49501 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
49502diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
49503index b2a34a1..162fa69 100644
49504--- a/fs/ecryptfs/read_write.c
49505+++ b/fs/ecryptfs/read_write.c
49506@@ -48,7 +48,7 @@ int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
49507 return -EIO;
49508 fs_save = get_fs();
49509 set_fs(get_ds());
49510- rc = vfs_write(lower_file, data, size, &offset);
49511+ rc = vfs_write(lower_file, (const char __force_user *)data, size, &offset);
49512 set_fs(fs_save);
49513 mark_inode_dirty_sync(ecryptfs_inode);
49514 return rc;
49515@@ -244,7 +244,7 @@ int ecryptfs_read_lower(char *data, loff_t offset, size_t size,
49516 return -EIO;
49517 fs_save = get_fs();
49518 set_fs(get_ds());
49519- rc = vfs_read(lower_file, data, size, &offset);
49520+ rc = vfs_read(lower_file, (char __force_user *)data, size, &offset);
49521 set_fs(fs_save);
49522 return rc;
49523 }
49524diff --git a/fs/exec.c b/fs/exec.c
49525index 20df02c..9b8f78d 100644
49526--- a/fs/exec.c
49527+++ b/fs/exec.c
49528@@ -55,6 +55,17 @@
49529 #include <linux/pipe_fs_i.h>
49530 #include <linux/oom.h>
49531 #include <linux/compat.h>
49532+#include <linux/random.h>
49533+#include <linux/seq_file.h>
49534+#include <linux/coredump.h>
49535+#include <linux/mman.h>
49536+
49537+#ifdef CONFIG_PAX_REFCOUNT
49538+#include <linux/kallsyms.h>
49539+#include <linux/kdebug.h>
49540+#endif
49541+
49542+#include <trace/events/fs.h>
49543
49544 #include <asm/uaccess.h>
49545 #include <asm/mmu_context.h>
49546@@ -66,6 +77,18 @@
49547
49548 #include <trace/events/sched.h>
49549
49550+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49551+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
49552+{
49553+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
49554+}
49555+#endif
49556+
49557+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
49558+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
49559+EXPORT_SYMBOL(pax_set_initial_flags_func);
49560+#endif
49561+
49562 int suid_dumpable = 0;
49563
49564 static LIST_HEAD(formats);
49565@@ -75,8 +98,8 @@ void __register_binfmt(struct linux_binfmt * fmt, int insert)
49566 {
49567 BUG_ON(!fmt);
49568 write_lock(&binfmt_lock);
49569- insert ? list_add(&fmt->lh, &formats) :
49570- list_add_tail(&fmt->lh, &formats);
49571+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
49572+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
49573 write_unlock(&binfmt_lock);
49574 }
49575
49576@@ -85,7 +108,7 @@ EXPORT_SYMBOL(__register_binfmt);
49577 void unregister_binfmt(struct linux_binfmt * fmt)
49578 {
49579 write_lock(&binfmt_lock);
49580- list_del(&fmt->lh);
49581+ pax_list_del((struct list_head *)&fmt->lh);
49582 write_unlock(&binfmt_lock);
49583 }
49584
49585@@ -180,18 +203,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49586 int write)
49587 {
49588 struct page *page;
49589- int ret;
49590
49591-#ifdef CONFIG_STACK_GROWSUP
49592- if (write) {
49593- ret = expand_downwards(bprm->vma, pos);
49594- if (ret < 0)
49595- return NULL;
49596- }
49597-#endif
49598- ret = get_user_pages(current, bprm->mm, pos,
49599- 1, write, 1, &page, NULL);
49600- if (ret <= 0)
49601+ if (0 > expand_downwards(bprm->vma, pos))
49602+ return NULL;
49603+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
49604 return NULL;
49605
49606 if (write) {
49607@@ -207,6 +222,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
49608 if (size <= ARG_MAX)
49609 return page;
49610
49611+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49612+ // only allow 512KB for argv+env on suid/sgid binaries
49613+ // to prevent easy ASLR exhaustion
49614+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
49615+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
49616+ (size > (512 * 1024))) {
49617+ put_page(page);
49618+ return NULL;
49619+ }
49620+#endif
49621+
49622 /*
49623 * Limit to 1/4-th the stack size for the argv+env strings.
49624 * This ensures that:
49625@@ -266,6 +292,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49626 vma->vm_end = STACK_TOP_MAX;
49627 vma->vm_start = vma->vm_end - PAGE_SIZE;
49628 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
49629+
49630+#ifdef CONFIG_PAX_SEGMEXEC
49631+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
49632+#endif
49633+
49634 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
49635 INIT_LIST_HEAD(&vma->anon_vma_chain);
49636
49637@@ -276,6 +307,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
49638 mm->stack_vm = mm->total_vm = 1;
49639 up_write(&mm->mmap_sem);
49640 bprm->p = vma->vm_end - sizeof(void *);
49641+
49642+#ifdef CONFIG_PAX_RANDUSTACK
49643+ if (randomize_va_space)
49644+ bprm->p ^= random32() & ~PAGE_MASK;
49645+#endif
49646+
49647 return 0;
49648 err:
49649 up_write(&mm->mmap_sem);
49650@@ -384,19 +421,7 @@ err:
49651 return err;
49652 }
49653
49654-struct user_arg_ptr {
49655-#ifdef CONFIG_COMPAT
49656- bool is_compat;
49657-#endif
49658- union {
49659- const char __user *const __user *native;
49660-#ifdef CONFIG_COMPAT
49661- const compat_uptr_t __user *compat;
49662-#endif
49663- } ptr;
49664-};
49665-
49666-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49667+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49668 {
49669 const char __user *native;
49670
49671@@ -405,14 +430,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
49672 compat_uptr_t compat;
49673
49674 if (get_user(compat, argv.ptr.compat + nr))
49675- return ERR_PTR(-EFAULT);
49676+ return (const char __force_user *)ERR_PTR(-EFAULT);
49677
49678 return compat_ptr(compat);
49679 }
49680 #endif
49681
49682 if (get_user(native, argv.ptr.native + nr))
49683- return ERR_PTR(-EFAULT);
49684+ return (const char __force_user *)ERR_PTR(-EFAULT);
49685
49686 return native;
49687 }
49688@@ -431,7 +456,7 @@ static int count(struct user_arg_ptr argv, int max)
49689 if (!p)
49690 break;
49691
49692- if (IS_ERR(p))
49693+ if (IS_ERR((const char __force_kernel *)p))
49694 return -EFAULT;
49695
49696 if (i >= max)
49697@@ -466,7 +491,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
49698
49699 ret = -EFAULT;
49700 str = get_user_arg_ptr(argv, argc);
49701- if (IS_ERR(str))
49702+ if (IS_ERR((const char __force_kernel *)str))
49703 goto out;
49704
49705 len = strnlen_user(str, MAX_ARG_STRLEN);
49706@@ -548,7 +573,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
49707 int r;
49708 mm_segment_t oldfs = get_fs();
49709 struct user_arg_ptr argv = {
49710- .ptr.native = (const char __user *const __user *)__argv,
49711+ .ptr.native = (const char __force_user *const __force_user *)__argv,
49712 };
49713
49714 set_fs(KERNEL_DS);
49715@@ -583,7 +608,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49716 unsigned long new_end = old_end - shift;
49717 struct mmu_gather tlb;
49718
49719- BUG_ON(new_start > new_end);
49720+ if (new_start >= new_end || new_start < mmap_min_addr)
49721+ return -ENOMEM;
49722
49723 /*
49724 * ensure there are no vmas between where we want to go
49725@@ -592,6 +618,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
49726 if (vma != find_vma(mm, new_start))
49727 return -EFAULT;
49728
49729+#ifdef CONFIG_PAX_SEGMEXEC
49730+ BUG_ON(pax_find_mirror_vma(vma));
49731+#endif
49732+
49733 /*
49734 * cover the whole range: [new_start, old_end)
49735 */
49736@@ -672,10 +702,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49737 stack_top = arch_align_stack(stack_top);
49738 stack_top = PAGE_ALIGN(stack_top);
49739
49740- if (unlikely(stack_top < mmap_min_addr) ||
49741- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
49742- return -ENOMEM;
49743-
49744 stack_shift = vma->vm_end - stack_top;
49745
49746 bprm->p -= stack_shift;
49747@@ -687,8 +713,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
49748 bprm->exec -= stack_shift;
49749
49750 down_write(&mm->mmap_sem);
49751+
49752+ /* Move stack pages down in memory. */
49753+ if (stack_shift) {
49754+ ret = shift_arg_pages(vma, stack_shift);
49755+ if (ret)
49756+ goto out_unlock;
49757+ }
49758+
49759 vm_flags = VM_STACK_FLAGS;
49760
49761+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49762+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49763+ vm_flags &= ~VM_EXEC;
49764+
49765+#ifdef CONFIG_PAX_MPROTECT
49766+ if (mm->pax_flags & MF_PAX_MPROTECT)
49767+ vm_flags &= ~VM_MAYEXEC;
49768+#endif
49769+
49770+ }
49771+#endif
49772+
49773 /*
49774 * Adjust stack execute permissions; explicitly enable for
49775 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
49776@@ -707,13 +753,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
49777 goto out_unlock;
49778 BUG_ON(prev != vma);
49779
49780- /* Move stack pages down in memory. */
49781- if (stack_shift) {
49782- ret = shift_arg_pages(vma, stack_shift);
49783- if (ret)
49784- goto out_unlock;
49785- }
49786-
49787 /* mprotect_fixup is overkill to remove the temporary stack flags */
49788 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
49789
49790@@ -737,6 +776,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
49791 #endif
49792 current->mm->start_stack = bprm->p;
49793 ret = expand_stack(vma, stack_base);
49794+
49795+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_ASLR)
49796+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
49797+ unsigned long size, flags, vm_flags;
49798+
49799+ size = STACK_TOP - vma->vm_end;
49800+ flags = MAP_FIXED | MAP_PRIVATE;
49801+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
49802+
49803+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, flags, vm_flags, 0);
49804+
49805+#ifdef CONFIG_X86
49806+ if (!ret) {
49807+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
49808+ ret = 0 != mmap_region(NULL, 0, size, flags, vm_flags, 0);
49809+ }
49810+#endif
49811+
49812+ }
49813+#endif
49814+
49815 if (ret)
49816 ret = -EFAULT;
49817
49818@@ -772,6 +832,8 @@ struct file *open_exec(const char *name)
49819
49820 fsnotify_open(file);
49821
49822+ trace_open_exec(name);
49823+
49824 err = deny_write_access(file);
49825 if (err)
49826 goto exit;
49827@@ -795,7 +857,7 @@ int kernel_read(struct file *file, loff_t offset,
49828 old_fs = get_fs();
49829 set_fs(get_ds());
49830 /* The cast to a user pointer is valid due to the set_fs() */
49831- result = vfs_read(file, (void __user *)addr, count, &pos);
49832+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
49833 set_fs(old_fs);
49834 return result;
49835 }
49836@@ -1247,7 +1309,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
49837 }
49838 rcu_read_unlock();
49839
49840- if (p->fs->users > n_fs) {
49841+ if (atomic_read(&p->fs->users) > n_fs) {
49842 bprm->unsafe |= LSM_UNSAFE_SHARE;
49843 } else {
49844 res = -EAGAIN;
49845@@ -1447,6 +1509,28 @@ int search_binary_handler(struct linux_binprm *bprm)
49846
49847 EXPORT_SYMBOL(search_binary_handler);
49848
49849+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49850+static DEFINE_PER_CPU(u64, exec_counter);
49851+static int __init init_exec_counters(void)
49852+{
49853+ unsigned int cpu;
49854+
49855+ for_each_possible_cpu(cpu) {
49856+ per_cpu(exec_counter, cpu) = (u64)cpu;
49857+ }
49858+
49859+ return 0;
49860+}
49861+early_initcall(init_exec_counters);
49862+static inline void increment_exec_counter(void)
49863+{
49864+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
49865+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
49866+}
49867+#else
49868+static inline void increment_exec_counter(void) {}
49869+#endif
49870+
49871 /*
49872 * sys_execve() executes a new program.
49873 */
49874@@ -1454,6 +1538,11 @@ static int do_execve_common(const char *filename,
49875 struct user_arg_ptr argv,
49876 struct user_arg_ptr envp)
49877 {
49878+#ifdef CONFIG_GRKERNSEC
49879+ struct file *old_exec_file;
49880+ struct acl_subject_label *old_acl;
49881+ struct rlimit old_rlim[RLIM_NLIMITS];
49882+#endif
49883 struct linux_binprm *bprm;
49884 struct file *file;
49885 struct files_struct *displaced;
49886@@ -1461,6 +1550,8 @@ static int do_execve_common(const char *filename,
49887 int retval;
49888 const struct cred *cred = current_cred();
49889
49890+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
49891+
49892 /*
49893 * We move the actual failure in case of RLIMIT_NPROC excess from
49894 * set*uid() to execve() because too many poorly written programs
49895@@ -1501,12 +1592,27 @@ static int do_execve_common(const char *filename,
49896 if (IS_ERR(file))
49897 goto out_unmark;
49898
49899+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
49900+ retval = -EPERM;
49901+ goto out_file;
49902+ }
49903+
49904 sched_exec();
49905
49906 bprm->file = file;
49907 bprm->filename = filename;
49908 bprm->interp = filename;
49909
49910+ if (gr_process_user_ban()) {
49911+ retval = -EPERM;
49912+ goto out_file;
49913+ }
49914+
49915+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
49916+ retval = -EACCES;
49917+ goto out_file;
49918+ }
49919+
49920 retval = bprm_mm_init(bprm);
49921 if (retval)
49922 goto out_file;
49923@@ -1523,24 +1629,65 @@ static int do_execve_common(const char *filename,
49924 if (retval < 0)
49925 goto out;
49926
49927+#ifdef CONFIG_GRKERNSEC
49928+ old_acl = current->acl;
49929+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
49930+ old_exec_file = current->exec_file;
49931+ get_file(file);
49932+ current->exec_file = file;
49933+#endif
49934+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
49935+ /* limit suid stack to 8MB
49936+ * we saved the old limits above and will restore them if this exec fails
49937+ */
49938+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
49939+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
49940+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
49941+#endif
49942+
49943+ if (!gr_tpe_allow(file)) {
49944+ retval = -EACCES;
49945+ goto out_fail;
49946+ }
49947+
49948+ if (gr_check_crash_exec(file)) {
49949+ retval = -EACCES;
49950+ goto out_fail;
49951+ }
49952+
49953+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
49954+ bprm->unsafe);
49955+ if (retval < 0)
49956+ goto out_fail;
49957+
49958 retval = copy_strings_kernel(1, &bprm->filename, bprm);
49959 if (retval < 0)
49960- goto out;
49961+ goto out_fail;
49962
49963 bprm->exec = bprm->p;
49964 retval = copy_strings(bprm->envc, envp, bprm);
49965 if (retval < 0)
49966- goto out;
49967+ goto out_fail;
49968
49969 retval = copy_strings(bprm->argc, argv, bprm);
49970 if (retval < 0)
49971- goto out;
49972+ goto out_fail;
49973+
49974+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
49975+
49976+ gr_handle_exec_args(bprm, argv);
49977
49978 retval = search_binary_handler(bprm);
49979 if (retval < 0)
49980- goto out;
49981+ goto out_fail;
49982+#ifdef CONFIG_GRKERNSEC
49983+ if (old_exec_file)
49984+ fput(old_exec_file);
49985+#endif
49986
49987 /* execve succeeded */
49988+
49989+ increment_exec_counter();
49990 current->fs->in_exec = 0;
49991 current->in_execve = 0;
49992 acct_update_integrals(current);
49993@@ -1549,6 +1696,14 @@ static int do_execve_common(const char *filename,
49994 put_files_struct(displaced);
49995 return retval;
49996
49997+out_fail:
49998+#ifdef CONFIG_GRKERNSEC
49999+ current->acl = old_acl;
50000+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
50001+ fput(current->exec_file);
50002+ current->exec_file = old_exec_file;
50003+#endif
50004+
50005 out:
50006 if (bprm->mm) {
50007 acct_arg_size(bprm, 0);
50008@@ -1697,3 +1852,253 @@ asmlinkage long compat_sys_execve(const char __user * filename,
50009 return error;
50010 }
50011 #endif
50012+
50013+int pax_check_flags(unsigned long *flags)
50014+{
50015+ int retval = 0;
50016+
50017+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
50018+ if (*flags & MF_PAX_SEGMEXEC)
50019+ {
50020+ *flags &= ~MF_PAX_SEGMEXEC;
50021+ retval = -EINVAL;
50022+ }
50023+#endif
50024+
50025+ if ((*flags & MF_PAX_PAGEEXEC)
50026+
50027+#ifdef CONFIG_PAX_PAGEEXEC
50028+ && (*flags & MF_PAX_SEGMEXEC)
50029+#endif
50030+
50031+ )
50032+ {
50033+ *flags &= ~MF_PAX_PAGEEXEC;
50034+ retval = -EINVAL;
50035+ }
50036+
50037+ if ((*flags & MF_PAX_MPROTECT)
50038+
50039+#ifdef CONFIG_PAX_MPROTECT
50040+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50041+#endif
50042+
50043+ )
50044+ {
50045+ *flags &= ~MF_PAX_MPROTECT;
50046+ retval = -EINVAL;
50047+ }
50048+
50049+ if ((*flags & MF_PAX_EMUTRAMP)
50050+
50051+#ifdef CONFIG_PAX_EMUTRAMP
50052+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
50053+#endif
50054+
50055+ )
50056+ {
50057+ *flags &= ~MF_PAX_EMUTRAMP;
50058+ retval = -EINVAL;
50059+ }
50060+
50061+ return retval;
50062+}
50063+
50064+EXPORT_SYMBOL(pax_check_flags);
50065+
50066+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50067+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
50068+{
50069+ struct task_struct *tsk = current;
50070+ struct mm_struct *mm = current->mm;
50071+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
50072+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
50073+ char *path_exec = NULL;
50074+ char *path_fault = NULL;
50075+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
50076+ siginfo_t info = { };
50077+
50078+ if (buffer_exec && buffer_fault) {
50079+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
50080+
50081+ down_read(&mm->mmap_sem);
50082+ vma = mm->mmap;
50083+ while (vma && (!vma_exec || !vma_fault)) {
50084+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
50085+ vma_exec = vma;
50086+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
50087+ vma_fault = vma;
50088+ vma = vma->vm_next;
50089+ }
50090+ if (vma_exec) {
50091+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
50092+ if (IS_ERR(path_exec))
50093+ path_exec = "<path too long>";
50094+ else {
50095+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
50096+ if (path_exec) {
50097+ *path_exec = 0;
50098+ path_exec = buffer_exec;
50099+ } else
50100+ path_exec = "<path too long>";
50101+ }
50102+ }
50103+ if (vma_fault) {
50104+ start = vma_fault->vm_start;
50105+ end = vma_fault->vm_end;
50106+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
50107+ if (vma_fault->vm_file) {
50108+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
50109+ if (IS_ERR(path_fault))
50110+ path_fault = "<path too long>";
50111+ else {
50112+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
50113+ if (path_fault) {
50114+ *path_fault = 0;
50115+ path_fault = buffer_fault;
50116+ } else
50117+ path_fault = "<path too long>";
50118+ }
50119+ } else
50120+ path_fault = "<anonymous mapping>";
50121+ }
50122+ up_read(&mm->mmap_sem);
50123+ }
50124+ if (tsk->signal->curr_ip)
50125+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
50126+ else
50127+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
50128+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
50129+ from_kuid(&init_user_ns, task_uid(tsk)), from_kuid(&init_user_ns, task_euid(tsk)), pc, sp);
50130+ free_page((unsigned long)buffer_exec);
50131+ free_page((unsigned long)buffer_fault);
50132+ pax_report_insns(regs, pc, sp);
50133+ info.si_signo = SIGKILL;
50134+ info.si_errno = 0;
50135+ info.si_code = SI_KERNEL;
50136+ info.si_pid = 0;
50137+ info.si_uid = 0;
50138+ do_coredump(&info);
50139+}
50140+#endif
50141+
50142+#ifdef CONFIG_PAX_REFCOUNT
50143+void pax_report_refcount_overflow(struct pt_regs *regs)
50144+{
50145+ if (current->signal->curr_ip)
50146+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
50147+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
50148+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
50149+ else
50150+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
50151+ from_kuid(&init_user_ns, current_uid()), from_kuid(&init_user_ns, current_euid()));
50152+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
50153+ show_regs(regs);
50154+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
50155+}
50156+#endif
50157+
50158+#ifdef CONFIG_PAX_USERCOPY
50159+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
50160+static noinline int check_stack_object(const void *obj, unsigned long len)
50161+{
50162+ const void * const stack = task_stack_page(current);
50163+ const void * const stackend = stack + THREAD_SIZE;
50164+
50165+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50166+ const void *frame = NULL;
50167+ const void *oldframe;
50168+#endif
50169+
50170+ if (obj + len < obj)
50171+ return -1;
50172+
50173+ if (obj + len <= stack || stackend <= obj)
50174+ return 0;
50175+
50176+ if (obj < stack || stackend < obj + len)
50177+ return -1;
50178+
50179+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
50180+ oldframe = __builtin_frame_address(1);
50181+ if (oldframe)
50182+ frame = __builtin_frame_address(2);
50183+ /*
50184+ low ----------------------------------------------> high
50185+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
50186+ ^----------------^
50187+ allow copies only within here
50188+ */
50189+ while (stack <= frame && frame < stackend) {
50190+ /* if obj + len extends past the last frame, this
50191+ check won't pass and the next frame will be 0,
50192+ causing us to bail out and correctly report
50193+ the copy as invalid
50194+ */
50195+ if (obj + len <= frame)
50196+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
50197+ oldframe = frame;
50198+ frame = *(const void * const *)frame;
50199+ }
50200+ return -1;
50201+#else
50202+ return 1;
50203+#endif
50204+}
50205+
50206+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
50207+{
50208+ if (current->signal->curr_ip)
50209+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50210+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50211+ else
50212+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
50213+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
50214+ dump_stack();
50215+ gr_handle_kernel_exploit();
50216+ do_group_exit(SIGKILL);
50217+}
50218+#endif
50219+
50220+void __check_object_size(const void *ptr, unsigned long n, bool to)
50221+{
50222+
50223+#ifdef CONFIG_PAX_USERCOPY
50224+ const char *type;
50225+
50226+ if (!n)
50227+ return;
50228+
50229+ type = check_heap_object(ptr, n);
50230+ if (!type) {
50231+ if (check_stack_object(ptr, n) != -1)
50232+ return;
50233+ type = "<process stack>";
50234+ }
50235+
50236+ pax_report_usercopy(ptr, n, to, type);
50237+#endif
50238+
50239+}
50240+EXPORT_SYMBOL(__check_object_size);
50241+
50242+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
50243+void pax_track_stack(void)
50244+{
50245+ unsigned long sp = (unsigned long)&sp;
50246+ if (sp < current_thread_info()->lowest_stack &&
50247+ sp > (unsigned long)task_stack_page(current))
50248+ current_thread_info()->lowest_stack = sp;
50249+}
50250+EXPORT_SYMBOL(pax_track_stack);
50251+#endif
50252+
50253+#ifdef CONFIG_PAX_SIZE_OVERFLOW
50254+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
50255+{
50256+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
50257+ dump_stack();
50258+ do_group_exit(SIGKILL);
50259+}
50260+EXPORT_SYMBOL(report_size_overflow);
50261+#endif
50262diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
50263index 2616d0e..2ffdec9 100644
50264--- a/fs/ext2/balloc.c
50265+++ b/fs/ext2/balloc.c
50266@@ -1190,10 +1190,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
50267
50268 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50269 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50270- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50271+ if (free_blocks < root_blocks + 1 &&
50272 !uid_eq(sbi->s_resuid, current_fsuid()) &&
50273 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50274- !in_group_p (sbi->s_resgid))) {
50275+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50276 return 0;
50277 }
50278 return 1;
50279diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
50280index 22548f5..41521d8 100644
50281--- a/fs/ext3/balloc.c
50282+++ b/fs/ext3/balloc.c
50283@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
50284
50285 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
50286 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
50287- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
50288+ if (free_blocks < root_blocks + 1 &&
50289 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
50290 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
50291- !in_group_p (sbi->s_resgid))) {
50292+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
50293 return 0;
50294 }
50295 return 1;
50296diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
50297index 2f2e0da..89b113a 100644
50298--- a/fs/ext4/balloc.c
50299+++ b/fs/ext4/balloc.c
50300@@ -505,8 +505,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
50301 /* Hm, nope. Are (enough) root reserved clusters available? */
50302 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
50303 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
50304- capable(CAP_SYS_RESOURCE) ||
50305- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
50306+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
50307+ capable_nolog(CAP_SYS_RESOURCE)) {
50308
50309 if (free_clusters >= (nclusters + dirty_clusters))
50310 return 1;
50311diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
50312index 8462eb3..4a71af6 100644
50313--- a/fs/ext4/ext4.h
50314+++ b/fs/ext4/ext4.h
50315@@ -1265,19 +1265,19 @@ struct ext4_sb_info {
50316 unsigned long s_mb_last_start;
50317
50318 /* stats for buddy allocator */
50319- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
50320- atomic_t s_bal_success; /* we found long enough chunks */
50321- atomic_t s_bal_allocated; /* in blocks */
50322- atomic_t s_bal_ex_scanned; /* total extents scanned */
50323- atomic_t s_bal_goals; /* goal hits */
50324- atomic_t s_bal_breaks; /* too long searches */
50325- atomic_t s_bal_2orders; /* 2^order hits */
50326+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
50327+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
50328+ atomic_unchecked_t s_bal_allocated; /* in blocks */
50329+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
50330+ atomic_unchecked_t s_bal_goals; /* goal hits */
50331+ atomic_unchecked_t s_bal_breaks; /* too long searches */
50332+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
50333 spinlock_t s_bal_lock;
50334 unsigned long s_mb_buddies_generated;
50335 unsigned long long s_mb_generation_time;
50336- atomic_t s_mb_lost_chunks;
50337- atomic_t s_mb_preallocated;
50338- atomic_t s_mb_discarded;
50339+ atomic_unchecked_t s_mb_lost_chunks;
50340+ atomic_unchecked_t s_mb_preallocated;
50341+ atomic_unchecked_t s_mb_discarded;
50342 atomic_t s_lock_busy;
50343
50344 /* locality groups */
50345diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
50346index 061727a..7622abf 100644
50347--- a/fs/ext4/mballoc.c
50348+++ b/fs/ext4/mballoc.c
50349@@ -1747,7 +1747,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
50350 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
50351
50352 if (EXT4_SB(sb)->s_mb_stats)
50353- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
50354+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
50355
50356 break;
50357 }
50358@@ -2044,7 +2044,7 @@ repeat:
50359 ac->ac_status = AC_STATUS_CONTINUE;
50360 ac->ac_flags |= EXT4_MB_HINT_FIRST;
50361 cr = 3;
50362- atomic_inc(&sbi->s_mb_lost_chunks);
50363+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
50364 goto repeat;
50365 }
50366 }
50367@@ -2552,25 +2552,25 @@ int ext4_mb_release(struct super_block *sb)
50368 if (sbi->s_mb_stats) {
50369 ext4_msg(sb, KERN_INFO,
50370 "mballoc: %u blocks %u reqs (%u success)",
50371- atomic_read(&sbi->s_bal_allocated),
50372- atomic_read(&sbi->s_bal_reqs),
50373- atomic_read(&sbi->s_bal_success));
50374+ atomic_read_unchecked(&sbi->s_bal_allocated),
50375+ atomic_read_unchecked(&sbi->s_bal_reqs),
50376+ atomic_read_unchecked(&sbi->s_bal_success));
50377 ext4_msg(sb, KERN_INFO,
50378 "mballoc: %u extents scanned, %u goal hits, "
50379 "%u 2^N hits, %u breaks, %u lost",
50380- atomic_read(&sbi->s_bal_ex_scanned),
50381- atomic_read(&sbi->s_bal_goals),
50382- atomic_read(&sbi->s_bal_2orders),
50383- atomic_read(&sbi->s_bal_breaks),
50384- atomic_read(&sbi->s_mb_lost_chunks));
50385+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
50386+ atomic_read_unchecked(&sbi->s_bal_goals),
50387+ atomic_read_unchecked(&sbi->s_bal_2orders),
50388+ atomic_read_unchecked(&sbi->s_bal_breaks),
50389+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
50390 ext4_msg(sb, KERN_INFO,
50391 "mballoc: %lu generated and it took %Lu",
50392 sbi->s_mb_buddies_generated,
50393 sbi->s_mb_generation_time);
50394 ext4_msg(sb, KERN_INFO,
50395 "mballoc: %u preallocated, %u discarded",
50396- atomic_read(&sbi->s_mb_preallocated),
50397- atomic_read(&sbi->s_mb_discarded));
50398+ atomic_read_unchecked(&sbi->s_mb_preallocated),
50399+ atomic_read_unchecked(&sbi->s_mb_discarded));
50400 }
50401
50402 free_percpu(sbi->s_locality_groups);
50403@@ -3060,16 +3060,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
50404 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
50405
50406 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
50407- atomic_inc(&sbi->s_bal_reqs);
50408- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50409+ atomic_inc_unchecked(&sbi->s_bal_reqs);
50410+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
50411 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
50412- atomic_inc(&sbi->s_bal_success);
50413- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
50414+ atomic_inc_unchecked(&sbi->s_bal_success);
50415+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
50416 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
50417 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
50418- atomic_inc(&sbi->s_bal_goals);
50419+ atomic_inc_unchecked(&sbi->s_bal_goals);
50420 if (ac->ac_found > sbi->s_mb_max_to_scan)
50421- atomic_inc(&sbi->s_bal_breaks);
50422+ atomic_inc_unchecked(&sbi->s_bal_breaks);
50423 }
50424
50425 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
50426@@ -3469,7 +3469,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
50427 trace_ext4_mb_new_inode_pa(ac, pa);
50428
50429 ext4_mb_use_inode_pa(ac, pa);
50430- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
50431+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
50432
50433 ei = EXT4_I(ac->ac_inode);
50434 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50435@@ -3529,7 +3529,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
50436 trace_ext4_mb_new_group_pa(ac, pa);
50437
50438 ext4_mb_use_group_pa(ac, pa);
50439- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50440+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
50441
50442 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
50443 lg = ac->ac_lg;
50444@@ -3618,7 +3618,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
50445 * from the bitmap and continue.
50446 */
50447 }
50448- atomic_add(free, &sbi->s_mb_discarded);
50449+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
50450
50451 return err;
50452 }
50453@@ -3636,7 +3636,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
50454 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
50455 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
50456 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
50457- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50458+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
50459 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
50460
50461 return 0;
50462diff --git a/fs/ext4/super.c b/fs/ext4/super.c
50463index 0465f36..99a003a 100644
50464--- a/fs/ext4/super.c
50465+++ b/fs/ext4/super.c
50466@@ -2429,7 +2429,7 @@ struct ext4_attr {
50467 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
50468 const char *, size_t);
50469 int offset;
50470-};
50471+} __do_const;
50472
50473 static int parse_strtoul(const char *buf,
50474 unsigned long max, unsigned long *value)
50475diff --git a/fs/fcntl.c b/fs/fcntl.c
50476index 71a600a..20d87b1 100644
50477--- a/fs/fcntl.c
50478+++ b/fs/fcntl.c
50479@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
50480 if (err)
50481 return err;
50482
50483+ if (gr_handle_chroot_fowner(pid, type))
50484+ return -ENOENT;
50485+ if (gr_check_protected_task_fowner(pid, type))
50486+ return -EACCES;
50487+
50488 f_modown(filp, pid, type, force);
50489 return 0;
50490 }
50491diff --git a/fs/fhandle.c b/fs/fhandle.c
50492index 999ff5c..41f4109 100644
50493--- a/fs/fhandle.c
50494+++ b/fs/fhandle.c
50495@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
50496 } else
50497 retval = 0;
50498 /* copy the mount id */
50499- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
50500- sizeof(*mnt_id)) ||
50501+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
50502 copy_to_user(ufh, handle,
50503 sizeof(struct file_handle) + handle_bytes))
50504 retval = -EFAULT;
50505diff --git a/fs/fifo.c b/fs/fifo.c
50506index cf6f434..3d7942c 100644
50507--- a/fs/fifo.c
50508+++ b/fs/fifo.c
50509@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
50510 */
50511 filp->f_op = &read_pipefifo_fops;
50512 pipe->r_counter++;
50513- if (pipe->readers++ == 0)
50514+ if (atomic_inc_return(&pipe->readers) == 1)
50515 wake_up_partner(inode);
50516
50517- if (!pipe->writers) {
50518+ if (!atomic_read(&pipe->writers)) {
50519 if ((filp->f_flags & O_NONBLOCK)) {
50520 /* suppress POLLHUP until we have
50521 * seen a writer */
50522@@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
50523 * errno=ENXIO when there is no process reading the FIFO.
50524 */
50525 ret = -ENXIO;
50526- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
50527+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
50528 goto err;
50529
50530 filp->f_op = &write_pipefifo_fops;
50531 pipe->w_counter++;
50532- if (!pipe->writers++)
50533+ if (atomic_inc_return(&pipe->writers) == 1)
50534 wake_up_partner(inode);
50535
50536- if (!pipe->readers) {
50537+ if (!atomic_read(&pipe->readers)) {
50538 if (wait_for_partner(inode, &pipe->r_counter))
50539 goto err_wr;
50540 }
50541@@ -104,11 +104,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
50542 */
50543 filp->f_op = &rdwr_pipefifo_fops;
50544
50545- pipe->readers++;
50546- pipe->writers++;
50547+ atomic_inc(&pipe->readers);
50548+ atomic_inc(&pipe->writers);
50549 pipe->r_counter++;
50550 pipe->w_counter++;
50551- if (pipe->readers == 1 || pipe->writers == 1)
50552+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
50553 wake_up_partner(inode);
50554 break;
50555
50556@@ -122,19 +122,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
50557 return 0;
50558
50559 err_rd:
50560- if (!--pipe->readers)
50561+ if (atomic_dec_and_test(&pipe->readers))
50562 wake_up_interruptible(&pipe->wait);
50563 ret = -ERESTARTSYS;
50564 goto err;
50565
50566 err_wr:
50567- if (!--pipe->writers)
50568+ if (atomic_dec_and_test(&pipe->writers))
50569 wake_up_interruptible(&pipe->wait);
50570 ret = -ERESTARTSYS;
50571 goto err;
50572
50573 err:
50574- if (!pipe->readers && !pipe->writers)
50575+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
50576 free_pipe_info(inode);
50577
50578 err_nocleanup:
50579diff --git a/fs/file.c b/fs/file.c
50580index 2b3570b..c57924b 100644
50581--- a/fs/file.c
50582+++ b/fs/file.c
50583@@ -16,6 +16,7 @@
50584 #include <linux/slab.h>
50585 #include <linux/vmalloc.h>
50586 #include <linux/file.h>
50587+#include <linux/security.h>
50588 #include <linux/fdtable.h>
50589 #include <linux/bitops.h>
50590 #include <linux/interrupt.h>
50591@@ -892,6 +893,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
50592 if (!file)
50593 return __close_fd(files, fd);
50594
50595+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
50596 if (fd >= rlimit(RLIMIT_NOFILE))
50597 return -EBADF;
50598
50599@@ -918,6 +920,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
50600 if (unlikely(oldfd == newfd))
50601 return -EINVAL;
50602
50603+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
50604 if (newfd >= rlimit(RLIMIT_NOFILE))
50605 return -EBADF;
50606
50607@@ -973,6 +976,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
50608 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
50609 {
50610 int err;
50611+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
50612 if (from >= rlimit(RLIMIT_NOFILE))
50613 return -EINVAL;
50614 err = alloc_fd(from, flags);
50615diff --git a/fs/filesystems.c b/fs/filesystems.c
50616index da165f6..3671bdb 100644
50617--- a/fs/filesystems.c
50618+++ b/fs/filesystems.c
50619@@ -273,7 +273,12 @@ struct file_system_type *get_fs_type(const char *name)
50620 int len = dot ? dot - name : strlen(name);
50621
50622 fs = __get_fs_type(name, len);
50623+
50624+#ifdef CONFIG_GRKERNSEC_MODHARDEN
50625+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
50626+#else
50627 if (!fs && (request_module("%.*s", len, name) == 0))
50628+#endif
50629 fs = __get_fs_type(name, len);
50630
50631 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
50632diff --git a/fs/fs_struct.c b/fs/fs_struct.c
50633index fe6ca58..65318cf 100644
50634--- a/fs/fs_struct.c
50635+++ b/fs/fs_struct.c
50636@@ -4,6 +4,7 @@
50637 #include <linux/path.h>
50638 #include <linux/slab.h>
50639 #include <linux/fs_struct.h>
50640+#include <linux/grsecurity.h>
50641 #include "internal.h"
50642
50643 /*
50644@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
50645 write_seqcount_begin(&fs->seq);
50646 old_root = fs->root;
50647 fs->root = *path;
50648+ gr_set_chroot_entries(current, path);
50649 write_seqcount_end(&fs->seq);
50650 spin_unlock(&fs->lock);
50651 if (old_root.dentry)
50652@@ -53,6 +55,21 @@ static inline int replace_path(struct path *p, const struct path *old, const str
50653 return 1;
50654 }
50655
50656+static inline int replace_root_path(struct task_struct *task, struct path *p, const struct path *old, struct path *new)
50657+{
50658+ if (likely(p->dentry != old->dentry || p->mnt != old->mnt))
50659+ return 0;
50660+ *p = *new;
50661+
50662+ /* This function is only called from pivot_root(). Leave our
50663+ gr_chroot_dentry and is_chrooted flags as-is, so that a
50664+ pivoted root isn't treated as a chroot
50665+ */
50666+ //gr_set_chroot_entries(task, new);
50667+
50668+ return 1;
50669+}
50670+
50671 void chroot_fs_refs(struct path *old_root, struct path *new_root)
50672 {
50673 struct task_struct *g, *p;
50674@@ -67,7 +84,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
50675 int hits = 0;
50676 spin_lock(&fs->lock);
50677 write_seqcount_begin(&fs->seq);
50678- hits += replace_path(&fs->root, old_root, new_root);
50679+ hits += replace_root_path(p, &fs->root, old_root, new_root);
50680 hits += replace_path(&fs->pwd, old_root, new_root);
50681 write_seqcount_end(&fs->seq);
50682 while (hits--) {
50683@@ -99,7 +116,8 @@ void exit_fs(struct task_struct *tsk)
50684 task_lock(tsk);
50685 spin_lock(&fs->lock);
50686 tsk->fs = NULL;
50687- kill = !--fs->users;
50688+ gr_clear_chroot_entries(tsk);
50689+ kill = !atomic_dec_return(&fs->users);
50690 spin_unlock(&fs->lock);
50691 task_unlock(tsk);
50692 if (kill)
50693@@ -112,7 +130,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
50694 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
50695 /* We don't need to lock fs - think why ;-) */
50696 if (fs) {
50697- fs->users = 1;
50698+ atomic_set(&fs->users, 1);
50699 fs->in_exec = 0;
50700 spin_lock_init(&fs->lock);
50701 seqcount_init(&fs->seq);
50702@@ -121,6 +139,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
50703 spin_lock(&old->lock);
50704 fs->root = old->root;
50705 path_get(&fs->root);
50706+ /* instead of calling gr_set_chroot_entries here,
50707+ we call it from every caller of this function
50708+ */
50709 fs->pwd = old->pwd;
50710 path_get(&fs->pwd);
50711 spin_unlock(&old->lock);
50712@@ -139,8 +160,9 @@ int unshare_fs_struct(void)
50713
50714 task_lock(current);
50715 spin_lock(&fs->lock);
50716- kill = !--fs->users;
50717+ kill = !atomic_dec_return(&fs->users);
50718 current->fs = new_fs;
50719+ gr_set_chroot_entries(current, &new_fs->root);
50720 spin_unlock(&fs->lock);
50721 task_unlock(current);
50722
50723@@ -153,13 +175,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
50724
50725 int current_umask(void)
50726 {
50727- return current->fs->umask;
50728+ return current->fs->umask | gr_acl_umask();
50729 }
50730 EXPORT_SYMBOL(current_umask);
50731
50732 /* to be mentioned only in INIT_TASK */
50733 struct fs_struct init_fs = {
50734- .users = 1,
50735+ .users = ATOMIC_INIT(1),
50736 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
50737 .seq = SEQCNT_ZERO,
50738 .umask = 0022,
50739diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
50740index 8dcb114..b1072e2 100644
50741--- a/fs/fscache/cookie.c
50742+++ b/fs/fscache/cookie.c
50743@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
50744 parent ? (char *) parent->def->name : "<no-parent>",
50745 def->name, netfs_data);
50746
50747- fscache_stat(&fscache_n_acquires);
50748+ fscache_stat_unchecked(&fscache_n_acquires);
50749
50750 /* if there's no parent cookie, then we don't create one here either */
50751 if (!parent) {
50752- fscache_stat(&fscache_n_acquires_null);
50753+ fscache_stat_unchecked(&fscache_n_acquires_null);
50754 _leave(" [no parent]");
50755 return NULL;
50756 }
50757@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
50758 /* allocate and initialise a cookie */
50759 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
50760 if (!cookie) {
50761- fscache_stat(&fscache_n_acquires_oom);
50762+ fscache_stat_unchecked(&fscache_n_acquires_oom);
50763 _leave(" [ENOMEM]");
50764 return NULL;
50765 }
50766@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50767
50768 switch (cookie->def->type) {
50769 case FSCACHE_COOKIE_TYPE_INDEX:
50770- fscache_stat(&fscache_n_cookie_index);
50771+ fscache_stat_unchecked(&fscache_n_cookie_index);
50772 break;
50773 case FSCACHE_COOKIE_TYPE_DATAFILE:
50774- fscache_stat(&fscache_n_cookie_data);
50775+ fscache_stat_unchecked(&fscache_n_cookie_data);
50776 break;
50777 default:
50778- fscache_stat(&fscache_n_cookie_special);
50779+ fscache_stat_unchecked(&fscache_n_cookie_special);
50780 break;
50781 }
50782
50783@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
50784 if (fscache_acquire_non_index_cookie(cookie) < 0) {
50785 atomic_dec(&parent->n_children);
50786 __fscache_cookie_put(cookie);
50787- fscache_stat(&fscache_n_acquires_nobufs);
50788+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
50789 _leave(" = NULL");
50790 return NULL;
50791 }
50792 }
50793
50794- fscache_stat(&fscache_n_acquires_ok);
50795+ fscache_stat_unchecked(&fscache_n_acquires_ok);
50796 _leave(" = %p", cookie);
50797 return cookie;
50798 }
50799@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
50800 cache = fscache_select_cache_for_object(cookie->parent);
50801 if (!cache) {
50802 up_read(&fscache_addremove_sem);
50803- fscache_stat(&fscache_n_acquires_no_cache);
50804+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
50805 _leave(" = -ENOMEDIUM [no cache]");
50806 return -ENOMEDIUM;
50807 }
50808@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
50809 object = cache->ops->alloc_object(cache, cookie);
50810 fscache_stat_d(&fscache_n_cop_alloc_object);
50811 if (IS_ERR(object)) {
50812- fscache_stat(&fscache_n_object_no_alloc);
50813+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
50814 ret = PTR_ERR(object);
50815 goto error;
50816 }
50817
50818- fscache_stat(&fscache_n_object_alloc);
50819+ fscache_stat_unchecked(&fscache_n_object_alloc);
50820
50821 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
50822
50823@@ -378,7 +378,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
50824
50825 _enter("{%s}", cookie->def->name);
50826
50827- fscache_stat(&fscache_n_invalidates);
50828+ fscache_stat_unchecked(&fscache_n_invalidates);
50829
50830 /* Only permit invalidation of data files. Invalidating an index will
50831 * require the caller to release all its attachments to the tree rooted
50832@@ -437,10 +437,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
50833 struct fscache_object *object;
50834 struct hlist_node *_p;
50835
50836- fscache_stat(&fscache_n_updates);
50837+ fscache_stat_unchecked(&fscache_n_updates);
50838
50839 if (!cookie) {
50840- fscache_stat(&fscache_n_updates_null);
50841+ fscache_stat_unchecked(&fscache_n_updates_null);
50842 _leave(" [no cookie]");
50843 return;
50844 }
50845@@ -474,12 +474,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50846 struct fscache_object *object;
50847 unsigned long event;
50848
50849- fscache_stat(&fscache_n_relinquishes);
50850+ fscache_stat_unchecked(&fscache_n_relinquishes);
50851 if (retire)
50852- fscache_stat(&fscache_n_relinquishes_retire);
50853+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
50854
50855 if (!cookie) {
50856- fscache_stat(&fscache_n_relinquishes_null);
50857+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
50858 _leave(" [no cookie]");
50859 return;
50860 }
50861@@ -495,7 +495,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
50862
50863 /* wait for the cookie to finish being instantiated (or to fail) */
50864 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
50865- fscache_stat(&fscache_n_relinquishes_waitcrt);
50866+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
50867 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
50868 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
50869 }
50870diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
50871index ee38fef..0a326d4 100644
50872--- a/fs/fscache/internal.h
50873+++ b/fs/fscache/internal.h
50874@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
50875 * stats.c
50876 */
50877 #ifdef CONFIG_FSCACHE_STATS
50878-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50879-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50880+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
50881+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
50882
50883-extern atomic_t fscache_n_op_pend;
50884-extern atomic_t fscache_n_op_run;
50885-extern atomic_t fscache_n_op_enqueue;
50886-extern atomic_t fscache_n_op_deferred_release;
50887-extern atomic_t fscache_n_op_release;
50888-extern atomic_t fscache_n_op_gc;
50889-extern atomic_t fscache_n_op_cancelled;
50890-extern atomic_t fscache_n_op_rejected;
50891+extern atomic_unchecked_t fscache_n_op_pend;
50892+extern atomic_unchecked_t fscache_n_op_run;
50893+extern atomic_unchecked_t fscache_n_op_enqueue;
50894+extern atomic_unchecked_t fscache_n_op_deferred_release;
50895+extern atomic_unchecked_t fscache_n_op_release;
50896+extern atomic_unchecked_t fscache_n_op_gc;
50897+extern atomic_unchecked_t fscache_n_op_cancelled;
50898+extern atomic_unchecked_t fscache_n_op_rejected;
50899
50900-extern atomic_t fscache_n_attr_changed;
50901-extern atomic_t fscache_n_attr_changed_ok;
50902-extern atomic_t fscache_n_attr_changed_nobufs;
50903-extern atomic_t fscache_n_attr_changed_nomem;
50904-extern atomic_t fscache_n_attr_changed_calls;
50905+extern atomic_unchecked_t fscache_n_attr_changed;
50906+extern atomic_unchecked_t fscache_n_attr_changed_ok;
50907+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
50908+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
50909+extern atomic_unchecked_t fscache_n_attr_changed_calls;
50910
50911-extern atomic_t fscache_n_allocs;
50912-extern atomic_t fscache_n_allocs_ok;
50913-extern atomic_t fscache_n_allocs_wait;
50914-extern atomic_t fscache_n_allocs_nobufs;
50915-extern atomic_t fscache_n_allocs_intr;
50916-extern atomic_t fscache_n_allocs_object_dead;
50917-extern atomic_t fscache_n_alloc_ops;
50918-extern atomic_t fscache_n_alloc_op_waits;
50919+extern atomic_unchecked_t fscache_n_allocs;
50920+extern atomic_unchecked_t fscache_n_allocs_ok;
50921+extern atomic_unchecked_t fscache_n_allocs_wait;
50922+extern atomic_unchecked_t fscache_n_allocs_nobufs;
50923+extern atomic_unchecked_t fscache_n_allocs_intr;
50924+extern atomic_unchecked_t fscache_n_allocs_object_dead;
50925+extern atomic_unchecked_t fscache_n_alloc_ops;
50926+extern atomic_unchecked_t fscache_n_alloc_op_waits;
50927
50928-extern atomic_t fscache_n_retrievals;
50929-extern atomic_t fscache_n_retrievals_ok;
50930-extern atomic_t fscache_n_retrievals_wait;
50931-extern atomic_t fscache_n_retrievals_nodata;
50932-extern atomic_t fscache_n_retrievals_nobufs;
50933-extern atomic_t fscache_n_retrievals_intr;
50934-extern atomic_t fscache_n_retrievals_nomem;
50935-extern atomic_t fscache_n_retrievals_object_dead;
50936-extern atomic_t fscache_n_retrieval_ops;
50937-extern atomic_t fscache_n_retrieval_op_waits;
50938+extern atomic_unchecked_t fscache_n_retrievals;
50939+extern atomic_unchecked_t fscache_n_retrievals_ok;
50940+extern atomic_unchecked_t fscache_n_retrievals_wait;
50941+extern atomic_unchecked_t fscache_n_retrievals_nodata;
50942+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
50943+extern atomic_unchecked_t fscache_n_retrievals_intr;
50944+extern atomic_unchecked_t fscache_n_retrievals_nomem;
50945+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
50946+extern atomic_unchecked_t fscache_n_retrieval_ops;
50947+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
50948
50949-extern atomic_t fscache_n_stores;
50950-extern atomic_t fscache_n_stores_ok;
50951-extern atomic_t fscache_n_stores_again;
50952-extern atomic_t fscache_n_stores_nobufs;
50953-extern atomic_t fscache_n_stores_oom;
50954-extern atomic_t fscache_n_store_ops;
50955-extern atomic_t fscache_n_store_calls;
50956-extern atomic_t fscache_n_store_pages;
50957-extern atomic_t fscache_n_store_radix_deletes;
50958-extern atomic_t fscache_n_store_pages_over_limit;
50959+extern atomic_unchecked_t fscache_n_stores;
50960+extern atomic_unchecked_t fscache_n_stores_ok;
50961+extern atomic_unchecked_t fscache_n_stores_again;
50962+extern atomic_unchecked_t fscache_n_stores_nobufs;
50963+extern atomic_unchecked_t fscache_n_stores_oom;
50964+extern atomic_unchecked_t fscache_n_store_ops;
50965+extern atomic_unchecked_t fscache_n_store_calls;
50966+extern atomic_unchecked_t fscache_n_store_pages;
50967+extern atomic_unchecked_t fscache_n_store_radix_deletes;
50968+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
50969
50970-extern atomic_t fscache_n_store_vmscan_not_storing;
50971-extern atomic_t fscache_n_store_vmscan_gone;
50972-extern atomic_t fscache_n_store_vmscan_busy;
50973-extern atomic_t fscache_n_store_vmscan_cancelled;
50974-extern atomic_t fscache_n_store_vmscan_wait;
50975+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50976+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
50977+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
50978+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50979+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
50980
50981-extern atomic_t fscache_n_marks;
50982-extern atomic_t fscache_n_uncaches;
50983+extern atomic_unchecked_t fscache_n_marks;
50984+extern atomic_unchecked_t fscache_n_uncaches;
50985
50986-extern atomic_t fscache_n_acquires;
50987-extern atomic_t fscache_n_acquires_null;
50988-extern atomic_t fscache_n_acquires_no_cache;
50989-extern atomic_t fscache_n_acquires_ok;
50990-extern atomic_t fscache_n_acquires_nobufs;
50991-extern atomic_t fscache_n_acquires_oom;
50992+extern atomic_unchecked_t fscache_n_acquires;
50993+extern atomic_unchecked_t fscache_n_acquires_null;
50994+extern atomic_unchecked_t fscache_n_acquires_no_cache;
50995+extern atomic_unchecked_t fscache_n_acquires_ok;
50996+extern atomic_unchecked_t fscache_n_acquires_nobufs;
50997+extern atomic_unchecked_t fscache_n_acquires_oom;
50998
50999-extern atomic_t fscache_n_invalidates;
51000-extern atomic_t fscache_n_invalidates_run;
51001+extern atomic_unchecked_t fscache_n_invalidates;
51002+extern atomic_unchecked_t fscache_n_invalidates_run;
51003
51004-extern atomic_t fscache_n_updates;
51005-extern atomic_t fscache_n_updates_null;
51006-extern atomic_t fscache_n_updates_run;
51007+extern atomic_unchecked_t fscache_n_updates;
51008+extern atomic_unchecked_t fscache_n_updates_null;
51009+extern atomic_unchecked_t fscache_n_updates_run;
51010
51011-extern atomic_t fscache_n_relinquishes;
51012-extern atomic_t fscache_n_relinquishes_null;
51013-extern atomic_t fscache_n_relinquishes_waitcrt;
51014-extern atomic_t fscache_n_relinquishes_retire;
51015+extern atomic_unchecked_t fscache_n_relinquishes;
51016+extern atomic_unchecked_t fscache_n_relinquishes_null;
51017+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51018+extern atomic_unchecked_t fscache_n_relinquishes_retire;
51019
51020-extern atomic_t fscache_n_cookie_index;
51021-extern atomic_t fscache_n_cookie_data;
51022-extern atomic_t fscache_n_cookie_special;
51023+extern atomic_unchecked_t fscache_n_cookie_index;
51024+extern atomic_unchecked_t fscache_n_cookie_data;
51025+extern atomic_unchecked_t fscache_n_cookie_special;
51026
51027-extern atomic_t fscache_n_object_alloc;
51028-extern atomic_t fscache_n_object_no_alloc;
51029-extern atomic_t fscache_n_object_lookups;
51030-extern atomic_t fscache_n_object_lookups_negative;
51031-extern atomic_t fscache_n_object_lookups_positive;
51032-extern atomic_t fscache_n_object_lookups_timed_out;
51033-extern atomic_t fscache_n_object_created;
51034-extern atomic_t fscache_n_object_avail;
51035-extern atomic_t fscache_n_object_dead;
51036+extern atomic_unchecked_t fscache_n_object_alloc;
51037+extern atomic_unchecked_t fscache_n_object_no_alloc;
51038+extern atomic_unchecked_t fscache_n_object_lookups;
51039+extern atomic_unchecked_t fscache_n_object_lookups_negative;
51040+extern atomic_unchecked_t fscache_n_object_lookups_positive;
51041+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
51042+extern atomic_unchecked_t fscache_n_object_created;
51043+extern atomic_unchecked_t fscache_n_object_avail;
51044+extern atomic_unchecked_t fscache_n_object_dead;
51045
51046-extern atomic_t fscache_n_checkaux_none;
51047-extern atomic_t fscache_n_checkaux_okay;
51048-extern atomic_t fscache_n_checkaux_update;
51049-extern atomic_t fscache_n_checkaux_obsolete;
51050+extern atomic_unchecked_t fscache_n_checkaux_none;
51051+extern atomic_unchecked_t fscache_n_checkaux_okay;
51052+extern atomic_unchecked_t fscache_n_checkaux_update;
51053+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
51054
51055 extern atomic_t fscache_n_cop_alloc_object;
51056 extern atomic_t fscache_n_cop_lookup_object;
51057@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
51058 atomic_inc(stat);
51059 }
51060
51061+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
51062+{
51063+ atomic_inc_unchecked(stat);
51064+}
51065+
51066 static inline void fscache_stat_d(atomic_t *stat)
51067 {
51068 atomic_dec(stat);
51069@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
51070
51071 #define __fscache_stat(stat) (NULL)
51072 #define fscache_stat(stat) do {} while (0)
51073+#define fscache_stat_unchecked(stat) do {} while (0)
51074 #define fscache_stat_d(stat) do {} while (0)
51075 #endif
51076
51077diff --git a/fs/fscache/object.c b/fs/fscache/object.c
51078index 50d41c1..10ee117 100644
51079--- a/fs/fscache/object.c
51080+++ b/fs/fscache/object.c
51081@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51082 /* Invalidate an object on disk */
51083 case FSCACHE_OBJECT_INVALIDATING:
51084 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
51085- fscache_stat(&fscache_n_invalidates_run);
51086+ fscache_stat_unchecked(&fscache_n_invalidates_run);
51087 fscache_stat(&fscache_n_cop_invalidate_object);
51088 fscache_invalidate_object(object);
51089 fscache_stat_d(&fscache_n_cop_invalidate_object);
51090@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51091 /* update the object metadata on disk */
51092 case FSCACHE_OBJECT_UPDATING:
51093 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
51094- fscache_stat(&fscache_n_updates_run);
51095+ fscache_stat_unchecked(&fscache_n_updates_run);
51096 fscache_stat(&fscache_n_cop_update_object);
51097 object->cache->ops->update_object(object);
51098 fscache_stat_d(&fscache_n_cop_update_object);
51099@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51100 spin_lock(&object->lock);
51101 object->state = FSCACHE_OBJECT_DEAD;
51102 spin_unlock(&object->lock);
51103- fscache_stat(&fscache_n_object_dead);
51104+ fscache_stat_unchecked(&fscache_n_object_dead);
51105 goto terminal_transit;
51106
51107 /* handle the parent cache of this object being withdrawn from
51108@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
51109 spin_lock(&object->lock);
51110 object->state = FSCACHE_OBJECT_DEAD;
51111 spin_unlock(&object->lock);
51112- fscache_stat(&fscache_n_object_dead);
51113+ fscache_stat_unchecked(&fscache_n_object_dead);
51114 goto terminal_transit;
51115
51116 /* complain about the object being woken up once it is
51117@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51118 parent->cookie->def->name, cookie->def->name,
51119 object->cache->tag->name);
51120
51121- fscache_stat(&fscache_n_object_lookups);
51122+ fscache_stat_unchecked(&fscache_n_object_lookups);
51123 fscache_stat(&fscache_n_cop_lookup_object);
51124 ret = object->cache->ops->lookup_object(object);
51125 fscache_stat_d(&fscache_n_cop_lookup_object);
51126@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
51127 if (ret == -ETIMEDOUT) {
51128 /* probably stuck behind another object, so move this one to
51129 * the back of the queue */
51130- fscache_stat(&fscache_n_object_lookups_timed_out);
51131+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
51132 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51133 }
51134
51135@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
51136
51137 spin_lock(&object->lock);
51138 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51139- fscache_stat(&fscache_n_object_lookups_negative);
51140+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
51141
51142 /* transit here to allow write requests to begin stacking up
51143 * and read requests to begin returning ENODATA */
51144@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
51145 * result, in which case there may be data available */
51146 spin_lock(&object->lock);
51147 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
51148- fscache_stat(&fscache_n_object_lookups_positive);
51149+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
51150
51151 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
51152
51153@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
51154 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
51155 } else {
51156 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
51157- fscache_stat(&fscache_n_object_created);
51158+ fscache_stat_unchecked(&fscache_n_object_created);
51159
51160 object->state = FSCACHE_OBJECT_AVAILABLE;
51161 spin_unlock(&object->lock);
51162@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
51163 fscache_enqueue_dependents(object);
51164
51165 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
51166- fscache_stat(&fscache_n_object_avail);
51167+ fscache_stat_unchecked(&fscache_n_object_avail);
51168
51169 _leave("");
51170 }
51171@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51172 enum fscache_checkaux result;
51173
51174 if (!object->cookie->def->check_aux) {
51175- fscache_stat(&fscache_n_checkaux_none);
51176+ fscache_stat_unchecked(&fscache_n_checkaux_none);
51177 return FSCACHE_CHECKAUX_OKAY;
51178 }
51179
51180@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
51181 switch (result) {
51182 /* entry okay as is */
51183 case FSCACHE_CHECKAUX_OKAY:
51184- fscache_stat(&fscache_n_checkaux_okay);
51185+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
51186 break;
51187
51188 /* entry requires update */
51189 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
51190- fscache_stat(&fscache_n_checkaux_update);
51191+ fscache_stat_unchecked(&fscache_n_checkaux_update);
51192 break;
51193
51194 /* entry requires deletion */
51195 case FSCACHE_CHECKAUX_OBSOLETE:
51196- fscache_stat(&fscache_n_checkaux_obsolete);
51197+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
51198 break;
51199
51200 default:
51201diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
51202index 762a9ec..2023284 100644
51203--- a/fs/fscache/operation.c
51204+++ b/fs/fscache/operation.c
51205@@ -17,7 +17,7 @@
51206 #include <linux/slab.h>
51207 #include "internal.h"
51208
51209-atomic_t fscache_op_debug_id;
51210+atomic_unchecked_t fscache_op_debug_id;
51211 EXPORT_SYMBOL(fscache_op_debug_id);
51212
51213 /**
51214@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
51215 ASSERTCMP(atomic_read(&op->usage), >, 0);
51216 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
51217
51218- fscache_stat(&fscache_n_op_enqueue);
51219+ fscache_stat_unchecked(&fscache_n_op_enqueue);
51220 switch (op->flags & FSCACHE_OP_TYPE) {
51221 case FSCACHE_OP_ASYNC:
51222 _debug("queue async");
51223@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
51224 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
51225 if (op->processor)
51226 fscache_enqueue_operation(op);
51227- fscache_stat(&fscache_n_op_run);
51228+ fscache_stat_unchecked(&fscache_n_op_run);
51229 }
51230
51231 /*
51232@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51233 if (object->n_in_progress > 0) {
51234 atomic_inc(&op->usage);
51235 list_add_tail(&op->pend_link, &object->pending_ops);
51236- fscache_stat(&fscache_n_op_pend);
51237+ fscache_stat_unchecked(&fscache_n_op_pend);
51238 } else if (!list_empty(&object->pending_ops)) {
51239 atomic_inc(&op->usage);
51240 list_add_tail(&op->pend_link, &object->pending_ops);
51241- fscache_stat(&fscache_n_op_pend);
51242+ fscache_stat_unchecked(&fscache_n_op_pend);
51243 fscache_start_operations(object);
51244 } else {
51245 ASSERTCMP(object->n_in_progress, ==, 0);
51246@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
51247 object->n_exclusive++; /* reads and writes must wait */
51248 atomic_inc(&op->usage);
51249 list_add_tail(&op->pend_link, &object->pending_ops);
51250- fscache_stat(&fscache_n_op_pend);
51251+ fscache_stat_unchecked(&fscache_n_op_pend);
51252 ret = 0;
51253 } else {
51254 /* If we're in any other state, there must have been an I/O
51255@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
51256 if (object->n_exclusive > 0) {
51257 atomic_inc(&op->usage);
51258 list_add_tail(&op->pend_link, &object->pending_ops);
51259- fscache_stat(&fscache_n_op_pend);
51260+ fscache_stat_unchecked(&fscache_n_op_pend);
51261 } else if (!list_empty(&object->pending_ops)) {
51262 atomic_inc(&op->usage);
51263 list_add_tail(&op->pend_link, &object->pending_ops);
51264- fscache_stat(&fscache_n_op_pend);
51265+ fscache_stat_unchecked(&fscache_n_op_pend);
51266 fscache_start_operations(object);
51267 } else {
51268 ASSERTCMP(object->n_exclusive, ==, 0);
51269@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
51270 object->n_ops++;
51271 atomic_inc(&op->usage);
51272 list_add_tail(&op->pend_link, &object->pending_ops);
51273- fscache_stat(&fscache_n_op_pend);
51274+ fscache_stat_unchecked(&fscache_n_op_pend);
51275 ret = 0;
51276 } else if (object->state == FSCACHE_OBJECT_DYING ||
51277 object->state == FSCACHE_OBJECT_LC_DYING ||
51278 object->state == FSCACHE_OBJECT_WITHDRAWING) {
51279- fscache_stat(&fscache_n_op_rejected);
51280+ fscache_stat_unchecked(&fscache_n_op_rejected);
51281 op->state = FSCACHE_OP_ST_CANCELLED;
51282 ret = -ENOBUFS;
51283 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
51284@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
51285 ret = -EBUSY;
51286 if (op->state == FSCACHE_OP_ST_PENDING) {
51287 ASSERT(!list_empty(&op->pend_link));
51288- fscache_stat(&fscache_n_op_cancelled);
51289+ fscache_stat_unchecked(&fscache_n_op_cancelled);
51290 list_del_init(&op->pend_link);
51291 if (do_cancel)
51292 do_cancel(op);
51293@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
51294 while (!list_empty(&object->pending_ops)) {
51295 op = list_entry(object->pending_ops.next,
51296 struct fscache_operation, pend_link);
51297- fscache_stat(&fscache_n_op_cancelled);
51298+ fscache_stat_unchecked(&fscache_n_op_cancelled);
51299 list_del_init(&op->pend_link);
51300
51301 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
51302@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
51303 op->state, ==, FSCACHE_OP_ST_CANCELLED);
51304 op->state = FSCACHE_OP_ST_DEAD;
51305
51306- fscache_stat(&fscache_n_op_release);
51307+ fscache_stat_unchecked(&fscache_n_op_release);
51308
51309 if (op->release) {
51310 op->release(op);
51311@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
51312 * lock, and defer it otherwise */
51313 if (!spin_trylock(&object->lock)) {
51314 _debug("defer put");
51315- fscache_stat(&fscache_n_op_deferred_release);
51316+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
51317
51318 cache = object->cache;
51319 spin_lock(&cache->op_gc_list_lock);
51320@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
51321
51322 _debug("GC DEFERRED REL OBJ%x OP%x",
51323 object->debug_id, op->debug_id);
51324- fscache_stat(&fscache_n_op_gc);
51325+ fscache_stat_unchecked(&fscache_n_op_gc);
51326
51327 ASSERTCMP(atomic_read(&op->usage), ==, 0);
51328 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
51329diff --git a/fs/fscache/page.c b/fs/fscache/page.c
51330index ff000e5..c44ec6d 100644
51331--- a/fs/fscache/page.c
51332+++ b/fs/fscache/page.c
51333@@ -61,7 +61,7 @@ try_again:
51334 val = radix_tree_lookup(&cookie->stores, page->index);
51335 if (!val) {
51336 rcu_read_unlock();
51337- fscache_stat(&fscache_n_store_vmscan_not_storing);
51338+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
51339 __fscache_uncache_page(cookie, page);
51340 return true;
51341 }
51342@@ -91,11 +91,11 @@ try_again:
51343 spin_unlock(&cookie->stores_lock);
51344
51345 if (xpage) {
51346- fscache_stat(&fscache_n_store_vmscan_cancelled);
51347- fscache_stat(&fscache_n_store_radix_deletes);
51348+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
51349+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51350 ASSERTCMP(xpage, ==, page);
51351 } else {
51352- fscache_stat(&fscache_n_store_vmscan_gone);
51353+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
51354 }
51355
51356 wake_up_bit(&cookie->flags, 0);
51357@@ -110,11 +110,11 @@ page_busy:
51358 * sleeping on memory allocation, so we may need to impose a timeout
51359 * too. */
51360 if (!(gfp & __GFP_WAIT)) {
51361- fscache_stat(&fscache_n_store_vmscan_busy);
51362+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
51363 return false;
51364 }
51365
51366- fscache_stat(&fscache_n_store_vmscan_wait);
51367+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
51368 __fscache_wait_on_page_write(cookie, page);
51369 gfp &= ~__GFP_WAIT;
51370 goto try_again;
51371@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
51372 FSCACHE_COOKIE_STORING_TAG);
51373 if (!radix_tree_tag_get(&cookie->stores, page->index,
51374 FSCACHE_COOKIE_PENDING_TAG)) {
51375- fscache_stat(&fscache_n_store_radix_deletes);
51376+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
51377 xpage = radix_tree_delete(&cookie->stores, page->index);
51378 }
51379 spin_unlock(&cookie->stores_lock);
51380@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
51381
51382 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
51383
51384- fscache_stat(&fscache_n_attr_changed_calls);
51385+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
51386
51387 if (fscache_object_is_active(object)) {
51388 fscache_stat(&fscache_n_cop_attr_changed);
51389@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51390
51391 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51392
51393- fscache_stat(&fscache_n_attr_changed);
51394+ fscache_stat_unchecked(&fscache_n_attr_changed);
51395
51396 op = kzalloc(sizeof(*op), GFP_KERNEL);
51397 if (!op) {
51398- fscache_stat(&fscache_n_attr_changed_nomem);
51399+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
51400 _leave(" = -ENOMEM");
51401 return -ENOMEM;
51402 }
51403@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51404 if (fscache_submit_exclusive_op(object, op) < 0)
51405 goto nobufs;
51406 spin_unlock(&cookie->lock);
51407- fscache_stat(&fscache_n_attr_changed_ok);
51408+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
51409 fscache_put_operation(op);
51410 _leave(" = 0");
51411 return 0;
51412@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
51413 nobufs:
51414 spin_unlock(&cookie->lock);
51415 kfree(op);
51416- fscache_stat(&fscache_n_attr_changed_nobufs);
51417+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
51418 _leave(" = %d", -ENOBUFS);
51419 return -ENOBUFS;
51420 }
51421@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
51422 /* allocate a retrieval operation and attempt to submit it */
51423 op = kzalloc(sizeof(*op), GFP_NOIO);
51424 if (!op) {
51425- fscache_stat(&fscache_n_retrievals_nomem);
51426+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51427 return NULL;
51428 }
51429
51430@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
51431 return 0;
51432 }
51433
51434- fscache_stat(&fscache_n_retrievals_wait);
51435+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
51436
51437 jif = jiffies;
51438 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
51439 fscache_wait_bit_interruptible,
51440 TASK_INTERRUPTIBLE) != 0) {
51441- fscache_stat(&fscache_n_retrievals_intr);
51442+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
51443 _leave(" = -ERESTARTSYS");
51444 return -ERESTARTSYS;
51445 }
51446@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
51447 */
51448 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51449 struct fscache_retrieval *op,
51450- atomic_t *stat_op_waits,
51451- atomic_t *stat_object_dead)
51452+ atomic_unchecked_t *stat_op_waits,
51453+ atomic_unchecked_t *stat_object_dead)
51454 {
51455 int ret;
51456
51457@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51458 goto check_if_dead;
51459
51460 _debug(">>> WT");
51461- fscache_stat(stat_op_waits);
51462+ fscache_stat_unchecked(stat_op_waits);
51463 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
51464 fscache_wait_bit_interruptible,
51465 TASK_INTERRUPTIBLE) != 0) {
51466@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
51467
51468 check_if_dead:
51469 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
51470- fscache_stat(stat_object_dead);
51471+ fscache_stat_unchecked(stat_object_dead);
51472 _leave(" = -ENOBUFS [cancelled]");
51473 return -ENOBUFS;
51474 }
51475 if (unlikely(fscache_object_is_dead(object))) {
51476 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
51477 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
51478- fscache_stat(stat_object_dead);
51479+ fscache_stat_unchecked(stat_object_dead);
51480 return -ENOBUFS;
51481 }
51482 return 0;
51483@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51484
51485 _enter("%p,%p,,,", cookie, page);
51486
51487- fscache_stat(&fscache_n_retrievals);
51488+ fscache_stat_unchecked(&fscache_n_retrievals);
51489
51490 if (hlist_empty(&cookie->backing_objects))
51491 goto nobufs;
51492@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51493 goto nobufs_unlock_dec;
51494 spin_unlock(&cookie->lock);
51495
51496- fscache_stat(&fscache_n_retrieval_ops);
51497+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
51498
51499 /* pin the netfs read context in case we need to do the actual netfs
51500 * read because we've encountered a cache read failure */
51501@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
51502
51503 error:
51504 if (ret == -ENOMEM)
51505- fscache_stat(&fscache_n_retrievals_nomem);
51506+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51507 else if (ret == -ERESTARTSYS)
51508- fscache_stat(&fscache_n_retrievals_intr);
51509+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
51510 else if (ret == -ENODATA)
51511- fscache_stat(&fscache_n_retrievals_nodata);
51512+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51513 else if (ret < 0)
51514- fscache_stat(&fscache_n_retrievals_nobufs);
51515+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51516 else
51517- fscache_stat(&fscache_n_retrievals_ok);
51518+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
51519
51520 fscache_put_retrieval(op);
51521 _leave(" = %d", ret);
51522@@ -467,7 +467,7 @@ nobufs_unlock:
51523 spin_unlock(&cookie->lock);
51524 kfree(op);
51525 nobufs:
51526- fscache_stat(&fscache_n_retrievals_nobufs);
51527+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51528 _leave(" = -ENOBUFS");
51529 return -ENOBUFS;
51530 }
51531@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51532
51533 _enter("%p,,%d,,,", cookie, *nr_pages);
51534
51535- fscache_stat(&fscache_n_retrievals);
51536+ fscache_stat_unchecked(&fscache_n_retrievals);
51537
51538 if (hlist_empty(&cookie->backing_objects))
51539 goto nobufs;
51540@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51541 goto nobufs_unlock_dec;
51542 spin_unlock(&cookie->lock);
51543
51544- fscache_stat(&fscache_n_retrieval_ops);
51545+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
51546
51547 /* pin the netfs read context in case we need to do the actual netfs
51548 * read because we've encountered a cache read failure */
51549@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
51550
51551 error:
51552 if (ret == -ENOMEM)
51553- fscache_stat(&fscache_n_retrievals_nomem);
51554+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
51555 else if (ret == -ERESTARTSYS)
51556- fscache_stat(&fscache_n_retrievals_intr);
51557+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
51558 else if (ret == -ENODATA)
51559- fscache_stat(&fscache_n_retrievals_nodata);
51560+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
51561 else if (ret < 0)
51562- fscache_stat(&fscache_n_retrievals_nobufs);
51563+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51564 else
51565- fscache_stat(&fscache_n_retrievals_ok);
51566+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
51567
51568 fscache_put_retrieval(op);
51569 _leave(" = %d", ret);
51570@@ -591,7 +591,7 @@ nobufs_unlock:
51571 spin_unlock(&cookie->lock);
51572 kfree(op);
51573 nobufs:
51574- fscache_stat(&fscache_n_retrievals_nobufs);
51575+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
51576 _leave(" = -ENOBUFS");
51577 return -ENOBUFS;
51578 }
51579@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51580
51581 _enter("%p,%p,,,", cookie, page);
51582
51583- fscache_stat(&fscache_n_allocs);
51584+ fscache_stat_unchecked(&fscache_n_allocs);
51585
51586 if (hlist_empty(&cookie->backing_objects))
51587 goto nobufs;
51588@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51589 goto nobufs_unlock;
51590 spin_unlock(&cookie->lock);
51591
51592- fscache_stat(&fscache_n_alloc_ops);
51593+ fscache_stat_unchecked(&fscache_n_alloc_ops);
51594
51595 ret = fscache_wait_for_retrieval_activation(
51596 object, op,
51597@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
51598
51599 error:
51600 if (ret == -ERESTARTSYS)
51601- fscache_stat(&fscache_n_allocs_intr);
51602+ fscache_stat_unchecked(&fscache_n_allocs_intr);
51603 else if (ret < 0)
51604- fscache_stat(&fscache_n_allocs_nobufs);
51605+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51606 else
51607- fscache_stat(&fscache_n_allocs_ok);
51608+ fscache_stat_unchecked(&fscache_n_allocs_ok);
51609
51610 fscache_put_retrieval(op);
51611 _leave(" = %d", ret);
51612@@ -677,7 +677,7 @@ nobufs_unlock:
51613 spin_unlock(&cookie->lock);
51614 kfree(op);
51615 nobufs:
51616- fscache_stat(&fscache_n_allocs_nobufs);
51617+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
51618 _leave(" = -ENOBUFS");
51619 return -ENOBUFS;
51620 }
51621@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51622
51623 spin_lock(&cookie->stores_lock);
51624
51625- fscache_stat(&fscache_n_store_calls);
51626+ fscache_stat_unchecked(&fscache_n_store_calls);
51627
51628 /* find a page to store */
51629 page = NULL;
51630@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51631 page = results[0];
51632 _debug("gang %d [%lx]", n, page->index);
51633 if (page->index > op->store_limit) {
51634- fscache_stat(&fscache_n_store_pages_over_limit);
51635+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
51636 goto superseded;
51637 }
51638
51639@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
51640 spin_unlock(&cookie->stores_lock);
51641 spin_unlock(&object->lock);
51642
51643- fscache_stat(&fscache_n_store_pages);
51644+ fscache_stat_unchecked(&fscache_n_store_pages);
51645 fscache_stat(&fscache_n_cop_write_page);
51646 ret = object->cache->ops->write_page(op, page);
51647 fscache_stat_d(&fscache_n_cop_write_page);
51648@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51649 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51650 ASSERT(PageFsCache(page));
51651
51652- fscache_stat(&fscache_n_stores);
51653+ fscache_stat_unchecked(&fscache_n_stores);
51654
51655 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
51656 _leave(" = -ENOBUFS [invalidating]");
51657@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51658 spin_unlock(&cookie->stores_lock);
51659 spin_unlock(&object->lock);
51660
51661- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
51662+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
51663 op->store_limit = object->store_limit;
51664
51665 if (fscache_submit_op(object, &op->op) < 0)
51666@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51667
51668 spin_unlock(&cookie->lock);
51669 radix_tree_preload_end();
51670- fscache_stat(&fscache_n_store_ops);
51671- fscache_stat(&fscache_n_stores_ok);
51672+ fscache_stat_unchecked(&fscache_n_store_ops);
51673+ fscache_stat_unchecked(&fscache_n_stores_ok);
51674
51675 /* the work queue now carries its own ref on the object */
51676 fscache_put_operation(&op->op);
51677@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
51678 return 0;
51679
51680 already_queued:
51681- fscache_stat(&fscache_n_stores_again);
51682+ fscache_stat_unchecked(&fscache_n_stores_again);
51683 already_pending:
51684 spin_unlock(&cookie->stores_lock);
51685 spin_unlock(&object->lock);
51686 spin_unlock(&cookie->lock);
51687 radix_tree_preload_end();
51688 kfree(op);
51689- fscache_stat(&fscache_n_stores_ok);
51690+ fscache_stat_unchecked(&fscache_n_stores_ok);
51691 _leave(" = 0");
51692 return 0;
51693
51694@@ -959,14 +959,14 @@ nobufs:
51695 spin_unlock(&cookie->lock);
51696 radix_tree_preload_end();
51697 kfree(op);
51698- fscache_stat(&fscache_n_stores_nobufs);
51699+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
51700 _leave(" = -ENOBUFS");
51701 return -ENOBUFS;
51702
51703 nomem_free:
51704 kfree(op);
51705 nomem:
51706- fscache_stat(&fscache_n_stores_oom);
51707+ fscache_stat_unchecked(&fscache_n_stores_oom);
51708 _leave(" = -ENOMEM");
51709 return -ENOMEM;
51710 }
51711@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
51712 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
51713 ASSERTCMP(page, !=, NULL);
51714
51715- fscache_stat(&fscache_n_uncaches);
51716+ fscache_stat_unchecked(&fscache_n_uncaches);
51717
51718 /* cache withdrawal may beat us to it */
51719 if (!PageFsCache(page))
51720@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
51721 struct fscache_cookie *cookie = op->op.object->cookie;
51722
51723 #ifdef CONFIG_FSCACHE_STATS
51724- atomic_inc(&fscache_n_marks);
51725+ atomic_inc_unchecked(&fscache_n_marks);
51726 #endif
51727
51728 _debug("- mark %p{%lx}", page, page->index);
51729diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
51730index 8179e8b..5072cc7 100644
51731--- a/fs/fscache/stats.c
51732+++ b/fs/fscache/stats.c
51733@@ -18,99 +18,99 @@
51734 /*
51735 * operation counters
51736 */
51737-atomic_t fscache_n_op_pend;
51738-atomic_t fscache_n_op_run;
51739-atomic_t fscache_n_op_enqueue;
51740-atomic_t fscache_n_op_requeue;
51741-atomic_t fscache_n_op_deferred_release;
51742-atomic_t fscache_n_op_release;
51743-atomic_t fscache_n_op_gc;
51744-atomic_t fscache_n_op_cancelled;
51745-atomic_t fscache_n_op_rejected;
51746+atomic_unchecked_t fscache_n_op_pend;
51747+atomic_unchecked_t fscache_n_op_run;
51748+atomic_unchecked_t fscache_n_op_enqueue;
51749+atomic_unchecked_t fscache_n_op_requeue;
51750+atomic_unchecked_t fscache_n_op_deferred_release;
51751+atomic_unchecked_t fscache_n_op_release;
51752+atomic_unchecked_t fscache_n_op_gc;
51753+atomic_unchecked_t fscache_n_op_cancelled;
51754+atomic_unchecked_t fscache_n_op_rejected;
51755
51756-atomic_t fscache_n_attr_changed;
51757-atomic_t fscache_n_attr_changed_ok;
51758-atomic_t fscache_n_attr_changed_nobufs;
51759-atomic_t fscache_n_attr_changed_nomem;
51760-atomic_t fscache_n_attr_changed_calls;
51761+atomic_unchecked_t fscache_n_attr_changed;
51762+atomic_unchecked_t fscache_n_attr_changed_ok;
51763+atomic_unchecked_t fscache_n_attr_changed_nobufs;
51764+atomic_unchecked_t fscache_n_attr_changed_nomem;
51765+atomic_unchecked_t fscache_n_attr_changed_calls;
51766
51767-atomic_t fscache_n_allocs;
51768-atomic_t fscache_n_allocs_ok;
51769-atomic_t fscache_n_allocs_wait;
51770-atomic_t fscache_n_allocs_nobufs;
51771-atomic_t fscache_n_allocs_intr;
51772-atomic_t fscache_n_allocs_object_dead;
51773-atomic_t fscache_n_alloc_ops;
51774-atomic_t fscache_n_alloc_op_waits;
51775+atomic_unchecked_t fscache_n_allocs;
51776+atomic_unchecked_t fscache_n_allocs_ok;
51777+atomic_unchecked_t fscache_n_allocs_wait;
51778+atomic_unchecked_t fscache_n_allocs_nobufs;
51779+atomic_unchecked_t fscache_n_allocs_intr;
51780+atomic_unchecked_t fscache_n_allocs_object_dead;
51781+atomic_unchecked_t fscache_n_alloc_ops;
51782+atomic_unchecked_t fscache_n_alloc_op_waits;
51783
51784-atomic_t fscache_n_retrievals;
51785-atomic_t fscache_n_retrievals_ok;
51786-atomic_t fscache_n_retrievals_wait;
51787-atomic_t fscache_n_retrievals_nodata;
51788-atomic_t fscache_n_retrievals_nobufs;
51789-atomic_t fscache_n_retrievals_intr;
51790-atomic_t fscache_n_retrievals_nomem;
51791-atomic_t fscache_n_retrievals_object_dead;
51792-atomic_t fscache_n_retrieval_ops;
51793-atomic_t fscache_n_retrieval_op_waits;
51794+atomic_unchecked_t fscache_n_retrievals;
51795+atomic_unchecked_t fscache_n_retrievals_ok;
51796+atomic_unchecked_t fscache_n_retrievals_wait;
51797+atomic_unchecked_t fscache_n_retrievals_nodata;
51798+atomic_unchecked_t fscache_n_retrievals_nobufs;
51799+atomic_unchecked_t fscache_n_retrievals_intr;
51800+atomic_unchecked_t fscache_n_retrievals_nomem;
51801+atomic_unchecked_t fscache_n_retrievals_object_dead;
51802+atomic_unchecked_t fscache_n_retrieval_ops;
51803+atomic_unchecked_t fscache_n_retrieval_op_waits;
51804
51805-atomic_t fscache_n_stores;
51806-atomic_t fscache_n_stores_ok;
51807-atomic_t fscache_n_stores_again;
51808-atomic_t fscache_n_stores_nobufs;
51809-atomic_t fscache_n_stores_oom;
51810-atomic_t fscache_n_store_ops;
51811-atomic_t fscache_n_store_calls;
51812-atomic_t fscache_n_store_pages;
51813-atomic_t fscache_n_store_radix_deletes;
51814-atomic_t fscache_n_store_pages_over_limit;
51815+atomic_unchecked_t fscache_n_stores;
51816+atomic_unchecked_t fscache_n_stores_ok;
51817+atomic_unchecked_t fscache_n_stores_again;
51818+atomic_unchecked_t fscache_n_stores_nobufs;
51819+atomic_unchecked_t fscache_n_stores_oom;
51820+atomic_unchecked_t fscache_n_store_ops;
51821+atomic_unchecked_t fscache_n_store_calls;
51822+atomic_unchecked_t fscache_n_store_pages;
51823+atomic_unchecked_t fscache_n_store_radix_deletes;
51824+atomic_unchecked_t fscache_n_store_pages_over_limit;
51825
51826-atomic_t fscache_n_store_vmscan_not_storing;
51827-atomic_t fscache_n_store_vmscan_gone;
51828-atomic_t fscache_n_store_vmscan_busy;
51829-atomic_t fscache_n_store_vmscan_cancelled;
51830-atomic_t fscache_n_store_vmscan_wait;
51831+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
51832+atomic_unchecked_t fscache_n_store_vmscan_gone;
51833+atomic_unchecked_t fscache_n_store_vmscan_busy;
51834+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
51835+atomic_unchecked_t fscache_n_store_vmscan_wait;
51836
51837-atomic_t fscache_n_marks;
51838-atomic_t fscache_n_uncaches;
51839+atomic_unchecked_t fscache_n_marks;
51840+atomic_unchecked_t fscache_n_uncaches;
51841
51842-atomic_t fscache_n_acquires;
51843-atomic_t fscache_n_acquires_null;
51844-atomic_t fscache_n_acquires_no_cache;
51845-atomic_t fscache_n_acquires_ok;
51846-atomic_t fscache_n_acquires_nobufs;
51847-atomic_t fscache_n_acquires_oom;
51848+atomic_unchecked_t fscache_n_acquires;
51849+atomic_unchecked_t fscache_n_acquires_null;
51850+atomic_unchecked_t fscache_n_acquires_no_cache;
51851+atomic_unchecked_t fscache_n_acquires_ok;
51852+atomic_unchecked_t fscache_n_acquires_nobufs;
51853+atomic_unchecked_t fscache_n_acquires_oom;
51854
51855-atomic_t fscache_n_invalidates;
51856-atomic_t fscache_n_invalidates_run;
51857+atomic_unchecked_t fscache_n_invalidates;
51858+atomic_unchecked_t fscache_n_invalidates_run;
51859
51860-atomic_t fscache_n_updates;
51861-atomic_t fscache_n_updates_null;
51862-atomic_t fscache_n_updates_run;
51863+atomic_unchecked_t fscache_n_updates;
51864+atomic_unchecked_t fscache_n_updates_null;
51865+atomic_unchecked_t fscache_n_updates_run;
51866
51867-atomic_t fscache_n_relinquishes;
51868-atomic_t fscache_n_relinquishes_null;
51869-atomic_t fscache_n_relinquishes_waitcrt;
51870-atomic_t fscache_n_relinquishes_retire;
51871+atomic_unchecked_t fscache_n_relinquishes;
51872+atomic_unchecked_t fscache_n_relinquishes_null;
51873+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
51874+atomic_unchecked_t fscache_n_relinquishes_retire;
51875
51876-atomic_t fscache_n_cookie_index;
51877-atomic_t fscache_n_cookie_data;
51878-atomic_t fscache_n_cookie_special;
51879+atomic_unchecked_t fscache_n_cookie_index;
51880+atomic_unchecked_t fscache_n_cookie_data;
51881+atomic_unchecked_t fscache_n_cookie_special;
51882
51883-atomic_t fscache_n_object_alloc;
51884-atomic_t fscache_n_object_no_alloc;
51885-atomic_t fscache_n_object_lookups;
51886-atomic_t fscache_n_object_lookups_negative;
51887-atomic_t fscache_n_object_lookups_positive;
51888-atomic_t fscache_n_object_lookups_timed_out;
51889-atomic_t fscache_n_object_created;
51890-atomic_t fscache_n_object_avail;
51891-atomic_t fscache_n_object_dead;
51892+atomic_unchecked_t fscache_n_object_alloc;
51893+atomic_unchecked_t fscache_n_object_no_alloc;
51894+atomic_unchecked_t fscache_n_object_lookups;
51895+atomic_unchecked_t fscache_n_object_lookups_negative;
51896+atomic_unchecked_t fscache_n_object_lookups_positive;
51897+atomic_unchecked_t fscache_n_object_lookups_timed_out;
51898+atomic_unchecked_t fscache_n_object_created;
51899+atomic_unchecked_t fscache_n_object_avail;
51900+atomic_unchecked_t fscache_n_object_dead;
51901
51902-atomic_t fscache_n_checkaux_none;
51903-atomic_t fscache_n_checkaux_okay;
51904-atomic_t fscache_n_checkaux_update;
51905-atomic_t fscache_n_checkaux_obsolete;
51906+atomic_unchecked_t fscache_n_checkaux_none;
51907+atomic_unchecked_t fscache_n_checkaux_okay;
51908+atomic_unchecked_t fscache_n_checkaux_update;
51909+atomic_unchecked_t fscache_n_checkaux_obsolete;
51910
51911 atomic_t fscache_n_cop_alloc_object;
51912 atomic_t fscache_n_cop_lookup_object;
51913@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
51914 seq_puts(m, "FS-Cache statistics\n");
51915
51916 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
51917- atomic_read(&fscache_n_cookie_index),
51918- atomic_read(&fscache_n_cookie_data),
51919- atomic_read(&fscache_n_cookie_special));
51920+ atomic_read_unchecked(&fscache_n_cookie_index),
51921+ atomic_read_unchecked(&fscache_n_cookie_data),
51922+ atomic_read_unchecked(&fscache_n_cookie_special));
51923
51924 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
51925- atomic_read(&fscache_n_object_alloc),
51926- atomic_read(&fscache_n_object_no_alloc),
51927- atomic_read(&fscache_n_object_avail),
51928- atomic_read(&fscache_n_object_dead));
51929+ atomic_read_unchecked(&fscache_n_object_alloc),
51930+ atomic_read_unchecked(&fscache_n_object_no_alloc),
51931+ atomic_read_unchecked(&fscache_n_object_avail),
51932+ atomic_read_unchecked(&fscache_n_object_dead));
51933 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
51934- atomic_read(&fscache_n_checkaux_none),
51935- atomic_read(&fscache_n_checkaux_okay),
51936- atomic_read(&fscache_n_checkaux_update),
51937- atomic_read(&fscache_n_checkaux_obsolete));
51938+ atomic_read_unchecked(&fscache_n_checkaux_none),
51939+ atomic_read_unchecked(&fscache_n_checkaux_okay),
51940+ atomic_read_unchecked(&fscache_n_checkaux_update),
51941+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
51942
51943 seq_printf(m, "Pages : mrk=%u unc=%u\n",
51944- atomic_read(&fscache_n_marks),
51945- atomic_read(&fscache_n_uncaches));
51946+ atomic_read_unchecked(&fscache_n_marks),
51947+ atomic_read_unchecked(&fscache_n_uncaches));
51948
51949 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
51950 " oom=%u\n",
51951- atomic_read(&fscache_n_acquires),
51952- atomic_read(&fscache_n_acquires_null),
51953- atomic_read(&fscache_n_acquires_no_cache),
51954- atomic_read(&fscache_n_acquires_ok),
51955- atomic_read(&fscache_n_acquires_nobufs),
51956- atomic_read(&fscache_n_acquires_oom));
51957+ atomic_read_unchecked(&fscache_n_acquires),
51958+ atomic_read_unchecked(&fscache_n_acquires_null),
51959+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
51960+ atomic_read_unchecked(&fscache_n_acquires_ok),
51961+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
51962+ atomic_read_unchecked(&fscache_n_acquires_oom));
51963
51964 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
51965- atomic_read(&fscache_n_object_lookups),
51966- atomic_read(&fscache_n_object_lookups_negative),
51967- atomic_read(&fscache_n_object_lookups_positive),
51968- atomic_read(&fscache_n_object_created),
51969- atomic_read(&fscache_n_object_lookups_timed_out));
51970+ atomic_read_unchecked(&fscache_n_object_lookups),
51971+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
51972+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
51973+ atomic_read_unchecked(&fscache_n_object_created),
51974+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
51975
51976 seq_printf(m, "Invals : n=%u run=%u\n",
51977- atomic_read(&fscache_n_invalidates),
51978- atomic_read(&fscache_n_invalidates_run));
51979+ atomic_read_unchecked(&fscache_n_invalidates),
51980+ atomic_read_unchecked(&fscache_n_invalidates_run));
51981
51982 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
51983- atomic_read(&fscache_n_updates),
51984- atomic_read(&fscache_n_updates_null),
51985- atomic_read(&fscache_n_updates_run));
51986+ atomic_read_unchecked(&fscache_n_updates),
51987+ atomic_read_unchecked(&fscache_n_updates_null),
51988+ atomic_read_unchecked(&fscache_n_updates_run));
51989
51990 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
51991- atomic_read(&fscache_n_relinquishes),
51992- atomic_read(&fscache_n_relinquishes_null),
51993- atomic_read(&fscache_n_relinquishes_waitcrt),
51994- atomic_read(&fscache_n_relinquishes_retire));
51995+ atomic_read_unchecked(&fscache_n_relinquishes),
51996+ atomic_read_unchecked(&fscache_n_relinquishes_null),
51997+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
51998+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
51999
52000 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
52001- atomic_read(&fscache_n_attr_changed),
52002- atomic_read(&fscache_n_attr_changed_ok),
52003- atomic_read(&fscache_n_attr_changed_nobufs),
52004- atomic_read(&fscache_n_attr_changed_nomem),
52005- atomic_read(&fscache_n_attr_changed_calls));
52006+ atomic_read_unchecked(&fscache_n_attr_changed),
52007+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
52008+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
52009+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
52010+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
52011
52012 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
52013- atomic_read(&fscache_n_allocs),
52014- atomic_read(&fscache_n_allocs_ok),
52015- atomic_read(&fscache_n_allocs_wait),
52016- atomic_read(&fscache_n_allocs_nobufs),
52017- atomic_read(&fscache_n_allocs_intr));
52018+ atomic_read_unchecked(&fscache_n_allocs),
52019+ atomic_read_unchecked(&fscache_n_allocs_ok),
52020+ atomic_read_unchecked(&fscache_n_allocs_wait),
52021+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
52022+ atomic_read_unchecked(&fscache_n_allocs_intr));
52023 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
52024- atomic_read(&fscache_n_alloc_ops),
52025- atomic_read(&fscache_n_alloc_op_waits),
52026- atomic_read(&fscache_n_allocs_object_dead));
52027+ atomic_read_unchecked(&fscache_n_alloc_ops),
52028+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
52029+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
52030
52031 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
52032 " int=%u oom=%u\n",
52033- atomic_read(&fscache_n_retrievals),
52034- atomic_read(&fscache_n_retrievals_ok),
52035- atomic_read(&fscache_n_retrievals_wait),
52036- atomic_read(&fscache_n_retrievals_nodata),
52037- atomic_read(&fscache_n_retrievals_nobufs),
52038- atomic_read(&fscache_n_retrievals_intr),
52039- atomic_read(&fscache_n_retrievals_nomem));
52040+ atomic_read_unchecked(&fscache_n_retrievals),
52041+ atomic_read_unchecked(&fscache_n_retrievals_ok),
52042+ atomic_read_unchecked(&fscache_n_retrievals_wait),
52043+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
52044+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
52045+ atomic_read_unchecked(&fscache_n_retrievals_intr),
52046+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
52047 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
52048- atomic_read(&fscache_n_retrieval_ops),
52049- atomic_read(&fscache_n_retrieval_op_waits),
52050- atomic_read(&fscache_n_retrievals_object_dead));
52051+ atomic_read_unchecked(&fscache_n_retrieval_ops),
52052+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
52053+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
52054
52055 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
52056- atomic_read(&fscache_n_stores),
52057- atomic_read(&fscache_n_stores_ok),
52058- atomic_read(&fscache_n_stores_again),
52059- atomic_read(&fscache_n_stores_nobufs),
52060- atomic_read(&fscache_n_stores_oom));
52061+ atomic_read_unchecked(&fscache_n_stores),
52062+ atomic_read_unchecked(&fscache_n_stores_ok),
52063+ atomic_read_unchecked(&fscache_n_stores_again),
52064+ atomic_read_unchecked(&fscache_n_stores_nobufs),
52065+ atomic_read_unchecked(&fscache_n_stores_oom));
52066 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
52067- atomic_read(&fscache_n_store_ops),
52068- atomic_read(&fscache_n_store_calls),
52069- atomic_read(&fscache_n_store_pages),
52070- atomic_read(&fscache_n_store_radix_deletes),
52071- atomic_read(&fscache_n_store_pages_over_limit));
52072+ atomic_read_unchecked(&fscache_n_store_ops),
52073+ atomic_read_unchecked(&fscache_n_store_calls),
52074+ atomic_read_unchecked(&fscache_n_store_pages),
52075+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
52076+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
52077
52078 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
52079- atomic_read(&fscache_n_store_vmscan_not_storing),
52080- atomic_read(&fscache_n_store_vmscan_gone),
52081- atomic_read(&fscache_n_store_vmscan_busy),
52082- atomic_read(&fscache_n_store_vmscan_cancelled),
52083- atomic_read(&fscache_n_store_vmscan_wait));
52084+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
52085+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
52086+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
52087+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
52088+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
52089
52090 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
52091- atomic_read(&fscache_n_op_pend),
52092- atomic_read(&fscache_n_op_run),
52093- atomic_read(&fscache_n_op_enqueue),
52094- atomic_read(&fscache_n_op_cancelled),
52095- atomic_read(&fscache_n_op_rejected));
52096+ atomic_read_unchecked(&fscache_n_op_pend),
52097+ atomic_read_unchecked(&fscache_n_op_run),
52098+ atomic_read_unchecked(&fscache_n_op_enqueue),
52099+ atomic_read_unchecked(&fscache_n_op_cancelled),
52100+ atomic_read_unchecked(&fscache_n_op_rejected));
52101 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
52102- atomic_read(&fscache_n_op_deferred_release),
52103- atomic_read(&fscache_n_op_release),
52104- atomic_read(&fscache_n_op_gc));
52105+ atomic_read_unchecked(&fscache_n_op_deferred_release),
52106+ atomic_read_unchecked(&fscache_n_op_release),
52107+ atomic_read_unchecked(&fscache_n_op_gc));
52108
52109 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
52110 atomic_read(&fscache_n_cop_alloc_object),
52111diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
52112index e397b67..b0d8709 100644
52113--- a/fs/fuse/cuse.c
52114+++ b/fs/fuse/cuse.c
52115@@ -593,10 +593,12 @@ static int __init cuse_init(void)
52116 INIT_LIST_HEAD(&cuse_conntbl[i]);
52117
52118 /* inherit and extend fuse_dev_operations */
52119- cuse_channel_fops = fuse_dev_operations;
52120- cuse_channel_fops.owner = THIS_MODULE;
52121- cuse_channel_fops.open = cuse_channel_open;
52122- cuse_channel_fops.release = cuse_channel_release;
52123+ pax_open_kernel();
52124+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
52125+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
52126+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
52127+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
52128+ pax_close_kernel();
52129
52130 cuse_class = class_create(THIS_MODULE, "cuse");
52131 if (IS_ERR(cuse_class))
52132diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
52133index e83351a..41e3c9c 100644
52134--- a/fs/fuse/dev.c
52135+++ b/fs/fuse/dev.c
52136@@ -1236,7 +1236,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
52137 ret = 0;
52138 pipe_lock(pipe);
52139
52140- if (!pipe->readers) {
52141+ if (!atomic_read(&pipe->readers)) {
52142 send_sig(SIGPIPE, current, 0);
52143 if (!ret)
52144 ret = -EPIPE;
52145diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
52146index 315e1f8..91f890c 100644
52147--- a/fs/fuse/dir.c
52148+++ b/fs/fuse/dir.c
52149@@ -1233,7 +1233,7 @@ static char *read_link(struct dentry *dentry)
52150 return link;
52151 }
52152
52153-static void free_link(char *link)
52154+static void free_link(const char *link)
52155 {
52156 if (!IS_ERR(link))
52157 free_page((unsigned long) link);
52158diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
52159index 2b6f569..fcb4d1f 100644
52160--- a/fs/gfs2/inode.c
52161+++ b/fs/gfs2/inode.c
52162@@ -1499,7 +1499,7 @@ out:
52163
52164 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
52165 {
52166- char *s = nd_get_link(nd);
52167+ const char *s = nd_get_link(nd);
52168 if (!IS_ERR(s))
52169 kfree(s);
52170 }
52171diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
52172index 78bde32..767e906 100644
52173--- a/fs/hugetlbfs/inode.c
52174+++ b/fs/hugetlbfs/inode.c
52175@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52176 struct mm_struct *mm = current->mm;
52177 struct vm_area_struct *vma;
52178 struct hstate *h = hstate_file(file);
52179+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
52180 struct vm_unmapped_area_info info;
52181
52182 if (len & ~huge_page_mask(h))
52183@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
52184 return addr;
52185 }
52186
52187+#ifdef CONFIG_PAX_RANDMMAP
52188+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
52189+#endif
52190+
52191 if (addr) {
52192 addr = ALIGN(addr, huge_page_size(h));
52193 vma = find_vma(mm, addr);
52194- if (TASK_SIZE - len >= addr &&
52195- (!vma || addr + len <= vma->vm_start))
52196+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
52197 return addr;
52198 }
52199
52200 info.flags = 0;
52201 info.length = len;
52202 info.low_limit = TASK_UNMAPPED_BASE;
52203+
52204+#ifdef CONFIG_PAX_RANDMMAP
52205+ if (mm->pax_flags & MF_PAX_RANDMMAP)
52206+ info.low_limit += mm->delta_mmap;
52207+#endif
52208+
52209 info.high_limit = TASK_SIZE;
52210 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
52211 info.align_offset = 0;
52212@@ -897,7 +907,7 @@ static struct file_system_type hugetlbfs_fs_type = {
52213 .kill_sb = kill_litter_super,
52214 };
52215
52216-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52217+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
52218
52219 static int can_do_hugetlb_shm(void)
52220 {
52221diff --git a/fs/inode.c b/fs/inode.c
52222index 14084b7..29af1d9 100644
52223--- a/fs/inode.c
52224+++ b/fs/inode.c
52225@@ -880,8 +880,8 @@ unsigned int get_next_ino(void)
52226
52227 #ifdef CONFIG_SMP
52228 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
52229- static atomic_t shared_last_ino;
52230- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
52231+ static atomic_unchecked_t shared_last_ino;
52232+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
52233
52234 res = next - LAST_INO_BATCH;
52235 }
52236diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
52237index 4a6cf28..d3a29d3 100644
52238--- a/fs/jffs2/erase.c
52239+++ b/fs/jffs2/erase.c
52240@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
52241 struct jffs2_unknown_node marker = {
52242 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
52243 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52244- .totlen = cpu_to_je32(c->cleanmarker_size)
52245+ .totlen = cpu_to_je32(c->cleanmarker_size),
52246+ .hdr_crc = cpu_to_je32(0)
52247 };
52248
52249 jffs2_prealloc_raw_node_refs(c, jeb, 1);
52250diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
52251index a6597d6..41b30ec 100644
52252--- a/fs/jffs2/wbuf.c
52253+++ b/fs/jffs2/wbuf.c
52254@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
52255 {
52256 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
52257 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
52258- .totlen = constant_cpu_to_je32(8)
52259+ .totlen = constant_cpu_to_je32(8),
52260+ .hdr_crc = constant_cpu_to_je32(0)
52261 };
52262
52263 /*
52264diff --git a/fs/jfs/super.c b/fs/jfs/super.c
52265index 1a543be..a4e1363 100644
52266--- a/fs/jfs/super.c
52267+++ b/fs/jfs/super.c
52268@@ -225,7 +225,7 @@ static const match_table_t tokens = {
52269 static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
52270 int *flag)
52271 {
52272- void *nls_map = (void *)-1; /* -1: no change; NULL: none */
52273+ const void *nls_map = (const void *)-1; /* -1: no change; NULL: none */
52274 char *p;
52275 struct jfs_sb_info *sbi = JFS_SBI(sb);
52276
52277@@ -253,7 +253,7 @@ static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
52278 /* Don't do anything ;-) */
52279 break;
52280 case Opt_iocharset:
52281- if (nls_map && nls_map != (void *) -1)
52282+ if (nls_map && nls_map != (const void *) -1)
52283 unload_nls(nls_map);
52284 if (!strcmp(args[0].from, "none"))
52285 nls_map = NULL;
52286@@ -855,7 +855,7 @@ static int __init init_jfs_fs(void)
52287
52288 jfs_inode_cachep =
52289 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
52290- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
52291+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
52292 init_once);
52293 if (jfs_inode_cachep == NULL)
52294 return -ENOMEM;
52295diff --git a/fs/libfs.c b/fs/libfs.c
52296index 916da8c..1588998 100644
52297--- a/fs/libfs.c
52298+++ b/fs/libfs.c
52299@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52300
52301 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
52302 struct dentry *next;
52303+ char d_name[sizeof(next->d_iname)];
52304+ const unsigned char *name;
52305+
52306 next = list_entry(p, struct dentry, d_u.d_child);
52307 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
52308 if (!simple_positive(next)) {
52309@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
52310
52311 spin_unlock(&next->d_lock);
52312 spin_unlock(&dentry->d_lock);
52313- if (filldir(dirent, next->d_name.name,
52314+ name = next->d_name.name;
52315+ if (name == next->d_iname) {
52316+ memcpy(d_name, name, next->d_name.len);
52317+ name = d_name;
52318+ }
52319+ if (filldir(dirent, name,
52320 next->d_name.len, filp->f_pos,
52321 next->d_inode->i_ino,
52322 dt_type(next->d_inode)) < 0)
52323diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
52324index 52e5120..808936e 100644
52325--- a/fs/lockd/clntproc.c
52326+++ b/fs/lockd/clntproc.c
52327@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
52328 /*
52329 * Cookie counter for NLM requests
52330 */
52331-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
52332+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
52333
52334 void nlmclnt_next_cookie(struct nlm_cookie *c)
52335 {
52336- u32 cookie = atomic_inc_return(&nlm_cookie);
52337+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
52338
52339 memcpy(c->data, &cookie, 4);
52340 c->len=4;
52341diff --git a/fs/locks.c b/fs/locks.c
52342index a94e331..060bce3 100644
52343--- a/fs/locks.c
52344+++ b/fs/locks.c
52345@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
52346 return;
52347
52348 if (filp->f_op && filp->f_op->flock) {
52349- struct file_lock fl = {
52350+ struct file_lock flock = {
52351 .fl_pid = current->tgid,
52352 .fl_file = filp,
52353 .fl_flags = FL_FLOCK,
52354 .fl_type = F_UNLCK,
52355 .fl_end = OFFSET_MAX,
52356 };
52357- filp->f_op->flock(filp, F_SETLKW, &fl);
52358- if (fl.fl_ops && fl.fl_ops->fl_release_private)
52359- fl.fl_ops->fl_release_private(&fl);
52360+ filp->f_op->flock(filp, F_SETLKW, &flock);
52361+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
52362+ flock.fl_ops->fl_release_private(&flock);
52363 }
52364
52365 lock_flocks();
52366diff --git a/fs/namei.c b/fs/namei.c
52367index 43a97ee..4e585fd 100644
52368--- a/fs/namei.c
52369+++ b/fs/namei.c
52370@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
52371 if (ret != -EACCES)
52372 return ret;
52373
52374+#ifdef CONFIG_GRKERNSEC
52375+ /* we'll block if we have to log due to a denied capability use */
52376+ if (mask & MAY_NOT_BLOCK)
52377+ return -ECHILD;
52378+#endif
52379+
52380 if (S_ISDIR(inode->i_mode)) {
52381 /* DACs are overridable for directories */
52382- if (inode_capable(inode, CAP_DAC_OVERRIDE))
52383- return 0;
52384 if (!(mask & MAY_WRITE))
52385- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
52386+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
52387+ inode_capable(inode, CAP_DAC_READ_SEARCH))
52388 return 0;
52389+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
52390+ return 0;
52391 return -EACCES;
52392 }
52393 /*
52394+ * Searching includes executable on directories, else just read.
52395+ */
52396+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52397+ if (mask == MAY_READ)
52398+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
52399+ inode_capable(inode, CAP_DAC_READ_SEARCH))
52400+ return 0;
52401+
52402+ /*
52403 * Read/write DACs are always overridable.
52404 * Executable DACs are overridable when there is
52405 * at least one exec bit set.
52406@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
52407 if (inode_capable(inode, CAP_DAC_OVERRIDE))
52408 return 0;
52409
52410- /*
52411- * Searching includes executable on directories, else just read.
52412- */
52413- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
52414- if (mask == MAY_READ)
52415- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
52416- return 0;
52417-
52418 return -EACCES;
52419 }
52420
52421@@ -826,7 +834,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
52422 {
52423 struct dentry *dentry = link->dentry;
52424 int error;
52425- char *s;
52426+ const char *s;
52427
52428 BUG_ON(nd->flags & LOOKUP_RCU);
52429
52430@@ -847,6 +855,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
52431 if (error)
52432 goto out_put_nd_path;
52433
52434+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
52435+ dentry->d_inode, dentry, nd->path.mnt)) {
52436+ error = -EACCES;
52437+ goto out_put_nd_path;
52438+ }
52439+
52440 nd->last_type = LAST_BIND;
52441 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
52442 error = PTR_ERR(*p);
52443@@ -1596,6 +1610,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
52444 break;
52445 res = walk_component(nd, path, &nd->last,
52446 nd->last_type, LOOKUP_FOLLOW);
52447+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
52448+ res = -EACCES;
52449 put_link(nd, &link, cookie);
52450 } while (res > 0);
52451
52452@@ -1694,7 +1710,7 @@ EXPORT_SYMBOL(full_name_hash);
52453 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
52454 {
52455 unsigned long a, b, adata, bdata, mask, hash, len;
52456- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
52457+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
52458
52459 hash = a = 0;
52460 len = -sizeof(unsigned long);
52461@@ -1979,6 +1995,8 @@ static int path_lookupat(int dfd, const char *name,
52462 if (err)
52463 break;
52464 err = lookup_last(nd, &path);
52465+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
52466+ err = -EACCES;
52467 put_link(nd, &link, cookie);
52468 }
52469 }
52470@@ -1986,6 +2004,19 @@ static int path_lookupat(int dfd, const char *name,
52471 if (!err)
52472 err = complete_walk(nd);
52473
52474+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
52475+#ifdef CONFIG_GRKERNSEC
52476+ if (flags & LOOKUP_RCU) {
52477+ path_put(&nd->path);
52478+ err = -ECHILD;
52479+ } else
52480+#endif
52481+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52482+ path_put(&nd->path);
52483+ err = -ENOENT;
52484+ }
52485+ }
52486+
52487 if (!err && nd->flags & LOOKUP_DIRECTORY) {
52488 if (!nd->inode->i_op->lookup) {
52489 path_put(&nd->path);
52490@@ -2013,8 +2044,17 @@ static int filename_lookup(int dfd, struct filename *name,
52491 retval = path_lookupat(dfd, name->name,
52492 flags | LOOKUP_REVAL, nd);
52493
52494- if (likely(!retval))
52495+ if (likely(!retval)) {
52496+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
52497+#ifdef CONFIG_GRKERNSEC
52498+ if (flags & LOOKUP_RCU)
52499+ return -ECHILD;
52500+#endif
52501+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
52502+ return -ENOENT;
52503+ }
52504 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
52505+ }
52506 return retval;
52507 }
52508
52509@@ -2392,6 +2432,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
52510 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
52511 return -EPERM;
52512
52513+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
52514+ return -EPERM;
52515+ if (gr_handle_rawio(inode))
52516+ return -EPERM;
52517+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
52518+ return -EACCES;
52519+
52520 return 0;
52521 }
52522
52523@@ -2613,7 +2660,7 @@ looked_up:
52524 * cleared otherwise prior to returning.
52525 */
52526 static int lookup_open(struct nameidata *nd, struct path *path,
52527- struct file *file,
52528+ struct path *link, struct file *file,
52529 const struct open_flags *op,
52530 bool got_write, int *opened)
52531 {
52532@@ -2648,6 +2695,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
52533 /* Negative dentry, just create the file */
52534 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
52535 umode_t mode = op->mode;
52536+
52537+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
52538+ error = -EACCES;
52539+ goto out_dput;
52540+ }
52541+
52542+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
52543+ error = -EACCES;
52544+ goto out_dput;
52545+ }
52546+
52547 if (!IS_POSIXACL(dir->d_inode))
52548 mode &= ~current_umask();
52549 /*
52550@@ -2669,6 +2727,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
52551 nd->flags & LOOKUP_EXCL);
52552 if (error)
52553 goto out_dput;
52554+ else
52555+ gr_handle_create(dentry, nd->path.mnt);
52556 }
52557 out_no_open:
52558 path->dentry = dentry;
52559@@ -2683,7 +2743,7 @@ out_dput:
52560 /*
52561 * Handle the last step of open()
52562 */
52563-static int do_last(struct nameidata *nd, struct path *path,
52564+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
52565 struct file *file, const struct open_flags *op,
52566 int *opened, struct filename *name)
52567 {
52568@@ -2712,16 +2772,44 @@ static int do_last(struct nameidata *nd, struct path *path,
52569 error = complete_walk(nd);
52570 if (error)
52571 return error;
52572+#ifdef CONFIG_GRKERNSEC
52573+ if (nd->flags & LOOKUP_RCU) {
52574+ error = -ECHILD;
52575+ goto out;
52576+ }
52577+#endif
52578+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52579+ error = -ENOENT;
52580+ goto out;
52581+ }
52582 audit_inode(name, nd->path.dentry, 0);
52583 if (open_flag & O_CREAT) {
52584 error = -EISDIR;
52585 goto out;
52586 }
52587+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
52588+ error = -EACCES;
52589+ goto out;
52590+ }
52591 goto finish_open;
52592 case LAST_BIND:
52593 error = complete_walk(nd);
52594 if (error)
52595 return error;
52596+#ifdef CONFIG_GRKERNSEC
52597+ if (nd->flags & LOOKUP_RCU) {
52598+ error = -ECHILD;
52599+ goto out;
52600+ }
52601+#endif
52602+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
52603+ error = -ENOENT;
52604+ goto out;
52605+ }
52606+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
52607+ error = -EACCES;
52608+ goto out;
52609+ }
52610 audit_inode(name, dir, 0);
52611 goto finish_open;
52612 }
52613@@ -2770,7 +2858,7 @@ retry_lookup:
52614 */
52615 }
52616 mutex_lock(&dir->d_inode->i_mutex);
52617- error = lookup_open(nd, path, file, op, got_write, opened);
52618+ error = lookup_open(nd, path, link, file, op, got_write, opened);
52619 mutex_unlock(&dir->d_inode->i_mutex);
52620
52621 if (error <= 0) {
52622@@ -2794,11 +2882,28 @@ retry_lookup:
52623 goto finish_open_created;
52624 }
52625
52626+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
52627+ error = -ENOENT;
52628+ goto exit_dput;
52629+ }
52630+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
52631+ error = -EACCES;
52632+ goto exit_dput;
52633+ }
52634+
52635 /*
52636 * create/update audit record if it already exists.
52637 */
52638- if (path->dentry->d_inode)
52639+ if (path->dentry->d_inode) {
52640+ /* only check if O_CREAT is specified, all other checks need to go
52641+ into may_open */
52642+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
52643+ error = -EACCES;
52644+ goto exit_dput;
52645+ }
52646+
52647 audit_inode(name, path->dentry, 0);
52648+ }
52649
52650 /*
52651 * If atomic_open() acquired write access it is dropped now due to
52652@@ -2839,6 +2944,11 @@ finish_lookup:
52653 }
52654 }
52655 BUG_ON(inode != path->dentry->d_inode);
52656+ /* if we're resolving a symlink to another symlink */
52657+ if (link && gr_handle_symlink_owner(link, inode)) {
52658+ error = -EACCES;
52659+ goto out;
52660+ }
52661 return 1;
52662 }
52663
52664@@ -2848,7 +2958,6 @@ finish_lookup:
52665 save_parent.dentry = nd->path.dentry;
52666 save_parent.mnt = mntget(path->mnt);
52667 nd->path.dentry = path->dentry;
52668-
52669 }
52670 nd->inode = inode;
52671 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
52672@@ -2857,6 +2966,22 @@ finish_lookup:
52673 path_put(&save_parent);
52674 return error;
52675 }
52676+
52677+#ifdef CONFIG_GRKERNSEC
52678+ if (nd->flags & LOOKUP_RCU) {
52679+ error = -ECHILD;
52680+ goto out;
52681+ }
52682+#endif
52683+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
52684+ error = -ENOENT;
52685+ goto out;
52686+ }
52687+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
52688+ error = -EACCES;
52689+ goto out;
52690+ }
52691+
52692 error = -EISDIR;
52693 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
52694 goto out;
52695@@ -2955,7 +3080,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
52696 if (unlikely(error))
52697 goto out;
52698
52699- error = do_last(nd, &path, file, op, &opened, pathname);
52700+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
52701 while (unlikely(error > 0)) { /* trailing symlink */
52702 struct path link = path;
52703 void *cookie;
52704@@ -2973,7 +3098,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
52705 error = follow_link(&link, nd, &cookie);
52706 if (unlikely(error))
52707 break;
52708- error = do_last(nd, &path, file, op, &opened, pathname);
52709+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
52710 put_link(nd, &link, cookie);
52711 }
52712 out:
52713@@ -3073,8 +3198,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
52714 goto unlock;
52715
52716 error = -EEXIST;
52717- if (dentry->d_inode)
52718+ if (dentry->d_inode) {
52719+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
52720+ error = -ENOENT;
52721+ }
52722 goto fail;
52723+ }
52724 /*
52725 * Special case - lookup gave negative, but... we had foo/bar/
52726 * From the vfs_mknod() POV we just have a negative dentry -
52727@@ -3126,6 +3255,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
52728 }
52729 EXPORT_SYMBOL(user_path_create);
52730
52731+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
52732+{
52733+ struct filename *tmp = getname(pathname);
52734+ struct dentry *res;
52735+ if (IS_ERR(tmp))
52736+ return ERR_CAST(tmp);
52737+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
52738+ if (IS_ERR(res))
52739+ putname(tmp);
52740+ else
52741+ *to = tmp;
52742+ return res;
52743+}
52744+
52745 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
52746 {
52747 int error = may_create(dir, dentry);
52748@@ -3188,6 +3331,17 @@ retry:
52749
52750 if (!IS_POSIXACL(path.dentry->d_inode))
52751 mode &= ~current_umask();
52752+
52753+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
52754+ error = -EPERM;
52755+ goto out;
52756+ }
52757+
52758+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
52759+ error = -EACCES;
52760+ goto out;
52761+ }
52762+
52763 error = security_path_mknod(&path, dentry, mode, dev);
52764 if (error)
52765 goto out;
52766@@ -3204,6 +3358,8 @@ retry:
52767 break;
52768 }
52769 out:
52770+ if (!error)
52771+ gr_handle_create(dentry, path.mnt);
52772 done_path_create(&path, dentry);
52773 if (retry_estale(error, lookup_flags)) {
52774 lookup_flags |= LOOKUP_REVAL;
52775@@ -3256,9 +3412,16 @@ retry:
52776
52777 if (!IS_POSIXACL(path.dentry->d_inode))
52778 mode &= ~current_umask();
52779+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
52780+ error = -EACCES;
52781+ goto out;
52782+ }
52783 error = security_path_mkdir(&path, dentry, mode);
52784 if (!error)
52785 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
52786+ if (!error)
52787+ gr_handle_create(dentry, path.mnt);
52788+out:
52789 done_path_create(&path, dentry);
52790 if (retry_estale(error, lookup_flags)) {
52791 lookup_flags |= LOOKUP_REVAL;
52792@@ -3339,6 +3502,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
52793 struct filename *name;
52794 struct dentry *dentry;
52795 struct nameidata nd;
52796+ ino_t saved_ino = 0;
52797+ dev_t saved_dev = 0;
52798 unsigned int lookup_flags = 0;
52799 retry:
52800 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
52801@@ -3371,10 +3536,21 @@ retry:
52802 error = -ENOENT;
52803 goto exit3;
52804 }
52805+
52806+ saved_ino = dentry->d_inode->i_ino;
52807+ saved_dev = gr_get_dev_from_dentry(dentry);
52808+
52809+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
52810+ error = -EACCES;
52811+ goto exit3;
52812+ }
52813+
52814 error = security_path_rmdir(&nd.path, dentry);
52815 if (error)
52816 goto exit3;
52817 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
52818+ if (!error && (saved_dev || saved_ino))
52819+ gr_handle_delete(saved_ino, saved_dev);
52820 exit3:
52821 dput(dentry);
52822 exit2:
52823@@ -3440,6 +3616,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
52824 struct dentry *dentry;
52825 struct nameidata nd;
52826 struct inode *inode = NULL;
52827+ ino_t saved_ino = 0;
52828+ dev_t saved_dev = 0;
52829 unsigned int lookup_flags = 0;
52830 retry:
52831 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
52832@@ -3466,10 +3644,22 @@ retry:
52833 if (!inode)
52834 goto slashes;
52835 ihold(inode);
52836+
52837+ if (inode->i_nlink <= 1) {
52838+ saved_ino = inode->i_ino;
52839+ saved_dev = gr_get_dev_from_dentry(dentry);
52840+ }
52841+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
52842+ error = -EACCES;
52843+ goto exit2;
52844+ }
52845+
52846 error = security_path_unlink(&nd.path, dentry);
52847 if (error)
52848 goto exit2;
52849 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
52850+ if (!error && (saved_ino || saved_dev))
52851+ gr_handle_delete(saved_ino, saved_dev);
52852 exit2:
52853 dput(dentry);
52854 }
52855@@ -3547,9 +3737,17 @@ retry:
52856 if (IS_ERR(dentry))
52857 goto out_putname;
52858
52859+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
52860+ error = -EACCES;
52861+ goto out;
52862+ }
52863+
52864 error = security_path_symlink(&path, dentry, from->name);
52865 if (!error)
52866 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
52867+ if (!error)
52868+ gr_handle_create(dentry, path.mnt);
52869+out:
52870 done_path_create(&path, dentry);
52871 if (retry_estale(error, lookup_flags)) {
52872 lookup_flags |= LOOKUP_REVAL;
52873@@ -3623,6 +3821,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
52874 {
52875 struct dentry *new_dentry;
52876 struct path old_path, new_path;
52877+ struct filename *to = NULL;
52878 int how = 0;
52879 int error;
52880
52881@@ -3646,7 +3845,7 @@ retry:
52882 if (error)
52883 return error;
52884
52885- new_dentry = user_path_create(newdfd, newname, &new_path,
52886+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
52887 (how & LOOKUP_REVAL));
52888 error = PTR_ERR(new_dentry);
52889 if (IS_ERR(new_dentry))
52890@@ -3658,11 +3857,28 @@ retry:
52891 error = may_linkat(&old_path);
52892 if (unlikely(error))
52893 goto out_dput;
52894+
52895+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
52896+ old_path.dentry->d_inode,
52897+ old_path.dentry->d_inode->i_mode, to)) {
52898+ error = -EACCES;
52899+ goto out_dput;
52900+ }
52901+
52902+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
52903+ old_path.dentry, old_path.mnt, to)) {
52904+ error = -EACCES;
52905+ goto out_dput;
52906+ }
52907+
52908 error = security_path_link(old_path.dentry, &new_path, new_dentry);
52909 if (error)
52910 goto out_dput;
52911 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
52912+ if (!error)
52913+ gr_handle_create(new_dentry, new_path.mnt);
52914 out_dput:
52915+ putname(to);
52916 done_path_create(&new_path, new_dentry);
52917 if (retry_estale(error, how)) {
52918 how |= LOOKUP_REVAL;
52919@@ -3908,12 +4124,21 @@ retry:
52920 if (new_dentry == trap)
52921 goto exit5;
52922
52923+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
52924+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
52925+ to);
52926+ if (error)
52927+ goto exit5;
52928+
52929 error = security_path_rename(&oldnd.path, old_dentry,
52930 &newnd.path, new_dentry);
52931 if (error)
52932 goto exit5;
52933 error = vfs_rename(old_dir->d_inode, old_dentry,
52934 new_dir->d_inode, new_dentry);
52935+ if (!error)
52936+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
52937+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
52938 exit5:
52939 dput(new_dentry);
52940 exit4:
52941@@ -3945,6 +4170,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
52942
52943 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
52944 {
52945+ char tmpbuf[64];
52946+ const char *newlink;
52947 int len;
52948
52949 len = PTR_ERR(link);
52950@@ -3954,7 +4181,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
52951 len = strlen(link);
52952 if (len > (unsigned) buflen)
52953 len = buflen;
52954- if (copy_to_user(buffer, link, len))
52955+
52956+ if (len < sizeof(tmpbuf)) {
52957+ memcpy(tmpbuf, link, len);
52958+ newlink = tmpbuf;
52959+ } else
52960+ newlink = link;
52961+
52962+ if (copy_to_user(buffer, newlink, len))
52963 len = -EFAULT;
52964 out:
52965 return len;
52966diff --git a/fs/namespace.c b/fs/namespace.c
52967index a51054f..f9b53e5 100644
52968--- a/fs/namespace.c
52969+++ b/fs/namespace.c
52970@@ -1215,6 +1215,9 @@ static int do_umount(struct mount *mnt, int flags)
52971 if (!(sb->s_flags & MS_RDONLY))
52972 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
52973 up_write(&sb->s_umount);
52974+
52975+ gr_log_remount(mnt->mnt_devname, retval);
52976+
52977 return retval;
52978 }
52979
52980@@ -1234,6 +1237,9 @@ static int do_umount(struct mount *mnt, int flags)
52981 br_write_unlock(&vfsmount_lock);
52982 up_write(&namespace_sem);
52983 release_mounts(&umount_list);
52984+
52985+ gr_log_unmount(mnt->mnt_devname, retval);
52986+
52987 return retval;
52988 }
52989
52990@@ -2287,6 +2293,16 @@ long do_mount(const char *dev_name, const char *dir_name,
52991 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
52992 MS_STRICTATIME);
52993
52994+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
52995+ retval = -EPERM;
52996+ goto dput_out;
52997+ }
52998+
52999+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
53000+ retval = -EPERM;
53001+ goto dput_out;
53002+ }
53003+
53004 if (flags & MS_REMOUNT)
53005 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
53006 data_page);
53007@@ -2301,6 +2317,9 @@ long do_mount(const char *dev_name, const char *dir_name,
53008 dev_name, data_page);
53009 dput_out:
53010 path_put(&path);
53011+
53012+ gr_log_mount(dev_name, dir_name, retval);
53013+
53014 return retval;
53015 }
53016
53017@@ -2587,6 +2606,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
53018 if (error)
53019 goto out2;
53020
53021+ if (gr_handle_chroot_pivot()) {
53022+ error = -EPERM;
53023+ goto out2;
53024+ }
53025+
53026 get_fs_root(current->fs, &root);
53027 error = lock_mount(&old);
53028 if (error)
53029@@ -2790,7 +2814,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
53030 !nsown_capable(CAP_SYS_ADMIN))
53031 return -EPERM;
53032
53033- if (fs->users != 1)
53034+ if (atomic_read(&fs->users) != 1)
53035 return -EINVAL;
53036
53037 get_mnt_ns(mnt_ns);
53038diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
53039index 59461c9..b17c57e 100644
53040--- a/fs/nfs/callback_xdr.c
53041+++ b/fs/nfs/callback_xdr.c
53042@@ -51,7 +51,7 @@ struct callback_op {
53043 callback_decode_arg_t decode_args;
53044 callback_encode_res_t encode_res;
53045 long res_maxsize;
53046-};
53047+} __do_const;
53048
53049 static struct callback_op callback_ops[];
53050
53051diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
53052index ebeb94c..ff35337 100644
53053--- a/fs/nfs/inode.c
53054+++ b/fs/nfs/inode.c
53055@@ -1042,16 +1042,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
53056 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
53057 }
53058
53059-static atomic_long_t nfs_attr_generation_counter;
53060+static atomic_long_unchecked_t nfs_attr_generation_counter;
53061
53062 static unsigned long nfs_read_attr_generation_counter(void)
53063 {
53064- return atomic_long_read(&nfs_attr_generation_counter);
53065+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
53066 }
53067
53068 unsigned long nfs_inc_attr_generation_counter(void)
53069 {
53070- return atomic_long_inc_return(&nfs_attr_generation_counter);
53071+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
53072 }
53073
53074 void nfs_fattr_init(struct nfs_fattr *fattr)
53075diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
53076index 9d1c5db..1e13db8 100644
53077--- a/fs/nfsd/nfs4proc.c
53078+++ b/fs/nfsd/nfs4proc.c
53079@@ -1097,7 +1097,7 @@ struct nfsd4_operation {
53080 nfsd4op_rsize op_rsize_bop;
53081 stateid_getter op_get_currentstateid;
53082 stateid_setter op_set_currentstateid;
53083-};
53084+} __do_const;
53085
53086 static struct nfsd4_operation nfsd4_ops[];
53087
53088diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
53089index 0dc1158..ccf0338 100644
53090--- a/fs/nfsd/nfs4xdr.c
53091+++ b/fs/nfsd/nfs4xdr.c
53092@@ -1456,7 +1456,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
53093
53094 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
53095
53096-static nfsd4_dec nfsd4_dec_ops[] = {
53097+static const nfsd4_dec nfsd4_dec_ops[] = {
53098 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53099 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53100 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53101@@ -1496,7 +1496,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
53102 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
53103 };
53104
53105-static nfsd4_dec nfsd41_dec_ops[] = {
53106+static const nfsd4_dec nfsd41_dec_ops[] = {
53107 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
53108 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
53109 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
53110@@ -1558,7 +1558,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
53111 };
53112
53113 struct nfsd4_minorversion_ops {
53114- nfsd4_dec *decoders;
53115+ const nfsd4_dec *decoders;
53116 int nops;
53117 };
53118
53119diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
53120index d586117..143d568 100644
53121--- a/fs/nfsd/vfs.c
53122+++ b/fs/nfsd/vfs.c
53123@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53124 } else {
53125 oldfs = get_fs();
53126 set_fs(KERNEL_DS);
53127- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
53128+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
53129 set_fs(oldfs);
53130 }
53131
53132@@ -1025,7 +1025,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
53133
53134 /* Write the data. */
53135 oldfs = get_fs(); set_fs(KERNEL_DS);
53136- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
53137+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
53138 set_fs(oldfs);
53139 if (host_err < 0)
53140 goto out_nfserr;
53141@@ -1571,7 +1571,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
53142 */
53143
53144 oldfs = get_fs(); set_fs(KERNEL_DS);
53145- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
53146+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
53147 set_fs(oldfs);
53148
53149 if (host_err < 0)
53150diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
53151index fea6bd5..8ee9d81 100644
53152--- a/fs/nls/nls_base.c
53153+++ b/fs/nls/nls_base.c
53154@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
53155
53156 int register_nls(struct nls_table * nls)
53157 {
53158- struct nls_table ** tmp = &tables;
53159+ struct nls_table *tmp = tables;
53160
53161 if (nls->next)
53162 return -EBUSY;
53163
53164 spin_lock(&nls_lock);
53165- while (*tmp) {
53166- if (nls == *tmp) {
53167+ while (tmp) {
53168+ if (nls == tmp) {
53169 spin_unlock(&nls_lock);
53170 return -EBUSY;
53171 }
53172- tmp = &(*tmp)->next;
53173+ tmp = tmp->next;
53174 }
53175- nls->next = tables;
53176+ pax_open_kernel();
53177+ *(struct nls_table **)&nls->next = tables;
53178+ pax_close_kernel();
53179 tables = nls;
53180 spin_unlock(&nls_lock);
53181 return 0;
53182@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
53183
53184 int unregister_nls(struct nls_table * nls)
53185 {
53186- struct nls_table ** tmp = &tables;
53187+ struct nls_table * const * tmp = &tables;
53188
53189 spin_lock(&nls_lock);
53190 while (*tmp) {
53191 if (nls == *tmp) {
53192- *tmp = nls->next;
53193+ pax_open_kernel();
53194+ *(struct nls_table **)tmp = nls->next;
53195+ pax_close_kernel();
53196 spin_unlock(&nls_lock);
53197 return 0;
53198 }
53199diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
53200index 7424929..35f6be5 100644
53201--- a/fs/nls/nls_euc-jp.c
53202+++ b/fs/nls/nls_euc-jp.c
53203@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
53204 p_nls = load_nls("cp932");
53205
53206 if (p_nls) {
53207- table.charset2upper = p_nls->charset2upper;
53208- table.charset2lower = p_nls->charset2lower;
53209+ pax_open_kernel();
53210+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53211+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53212+ pax_close_kernel();
53213 return register_nls(&table);
53214 }
53215
53216diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
53217index e7bc1d7..06bd4bb 100644
53218--- a/fs/nls/nls_koi8-ru.c
53219+++ b/fs/nls/nls_koi8-ru.c
53220@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
53221 p_nls = load_nls("koi8-u");
53222
53223 if (p_nls) {
53224- table.charset2upper = p_nls->charset2upper;
53225- table.charset2lower = p_nls->charset2lower;
53226+ pax_open_kernel();
53227+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
53228+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
53229+ pax_close_kernel();
53230 return register_nls(&table);
53231 }
53232
53233diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
53234index 9ff4a5e..deb1f0f 100644
53235--- a/fs/notify/fanotify/fanotify_user.c
53236+++ b/fs/notify/fanotify/fanotify_user.c
53237@@ -251,8 +251,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
53238
53239 fd = fanotify_event_metadata.fd;
53240 ret = -EFAULT;
53241- if (copy_to_user(buf, &fanotify_event_metadata,
53242- fanotify_event_metadata.event_len))
53243+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
53244+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
53245 goto out_close_fd;
53246
53247 ret = prepare_for_access_response(group, event, fd);
53248diff --git a/fs/notify/notification.c b/fs/notify/notification.c
53249index 7b51b05..5ea5ef6 100644
53250--- a/fs/notify/notification.c
53251+++ b/fs/notify/notification.c
53252@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
53253 * get set to 0 so it will never get 'freed'
53254 */
53255 static struct fsnotify_event *q_overflow_event;
53256-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53257+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53258
53259 /**
53260 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
53261@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
53262 */
53263 u32 fsnotify_get_cookie(void)
53264 {
53265- return atomic_inc_return(&fsnotify_sync_cookie);
53266+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
53267 }
53268 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
53269
53270diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
53271index 99e3610..02c1068 100644
53272--- a/fs/ntfs/dir.c
53273+++ b/fs/ntfs/dir.c
53274@@ -1329,7 +1329,7 @@ find_next_index_buffer:
53275 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
53276 ~(s64)(ndir->itype.index.block_size - 1)));
53277 /* Bounds checks. */
53278- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53279+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
53280 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
53281 "inode 0x%lx or driver bug.", vdir->i_ino);
53282 goto err_out;
53283diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
53284index 5b2d4f0..c6de396 100644
53285--- a/fs/ntfs/file.c
53286+++ b/fs/ntfs/file.c
53287@@ -2242,6 +2242,6 @@ const struct inode_operations ntfs_file_inode_ops = {
53288 #endif /* NTFS_RW */
53289 };
53290
53291-const struct file_operations ntfs_empty_file_ops = {};
53292+const struct file_operations ntfs_empty_file_ops __read_only;
53293
53294-const struct inode_operations ntfs_empty_inode_ops = {};
53295+const struct inode_operations ntfs_empty_inode_ops __read_only;
53296diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
53297index a9f78c7..ed8a381 100644
53298--- a/fs/ocfs2/localalloc.c
53299+++ b/fs/ocfs2/localalloc.c
53300@@ -1279,7 +1279,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
53301 goto bail;
53302 }
53303
53304- atomic_inc(&osb->alloc_stats.moves);
53305+ atomic_inc_unchecked(&osb->alloc_stats.moves);
53306
53307 bail:
53308 if (handle)
53309diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
53310index d355e6e..578d905 100644
53311--- a/fs/ocfs2/ocfs2.h
53312+++ b/fs/ocfs2/ocfs2.h
53313@@ -235,11 +235,11 @@ enum ocfs2_vol_state
53314
53315 struct ocfs2_alloc_stats
53316 {
53317- atomic_t moves;
53318- atomic_t local_data;
53319- atomic_t bitmap_data;
53320- atomic_t bg_allocs;
53321- atomic_t bg_extends;
53322+ atomic_unchecked_t moves;
53323+ atomic_unchecked_t local_data;
53324+ atomic_unchecked_t bitmap_data;
53325+ atomic_unchecked_t bg_allocs;
53326+ atomic_unchecked_t bg_extends;
53327 };
53328
53329 enum ocfs2_local_alloc_state
53330diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
53331index b7e74b5..19c6536 100644
53332--- a/fs/ocfs2/suballoc.c
53333+++ b/fs/ocfs2/suballoc.c
53334@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
53335 mlog_errno(status);
53336 goto bail;
53337 }
53338- atomic_inc(&osb->alloc_stats.bg_extends);
53339+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
53340
53341 /* You should never ask for this much metadata */
53342 BUG_ON(bits_wanted >
53343@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
53344 mlog_errno(status);
53345 goto bail;
53346 }
53347- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53348+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53349
53350 *suballoc_loc = res.sr_bg_blkno;
53351 *suballoc_bit_start = res.sr_bit_offset;
53352@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
53353 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
53354 res->sr_bits);
53355
53356- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53357+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53358
53359 BUG_ON(res->sr_bits != 1);
53360
53361@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
53362 mlog_errno(status);
53363 goto bail;
53364 }
53365- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53366+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
53367
53368 BUG_ON(res.sr_bits != 1);
53369
53370@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53371 cluster_start,
53372 num_clusters);
53373 if (!status)
53374- atomic_inc(&osb->alloc_stats.local_data);
53375+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
53376 } else {
53377 if (min_clusters > (osb->bitmap_cpg - 1)) {
53378 /* The only paths asking for contiguousness
53379@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
53380 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
53381 res.sr_bg_blkno,
53382 res.sr_bit_offset);
53383- atomic_inc(&osb->alloc_stats.bitmap_data);
53384+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
53385 *num_clusters = res.sr_bits;
53386 }
53387 }
53388diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
53389index 0e91ec2..f4b3fc6 100644
53390--- a/fs/ocfs2/super.c
53391+++ b/fs/ocfs2/super.c
53392@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
53393 "%10s => GlobalAllocs: %d LocalAllocs: %d "
53394 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
53395 "Stats",
53396- atomic_read(&osb->alloc_stats.bitmap_data),
53397- atomic_read(&osb->alloc_stats.local_data),
53398- atomic_read(&osb->alloc_stats.bg_allocs),
53399- atomic_read(&osb->alloc_stats.moves),
53400- atomic_read(&osb->alloc_stats.bg_extends));
53401+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
53402+ atomic_read_unchecked(&osb->alloc_stats.local_data),
53403+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
53404+ atomic_read_unchecked(&osb->alloc_stats.moves),
53405+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
53406
53407 out += snprintf(buf + out, len - out,
53408 "%10s => State: %u Descriptor: %llu Size: %u bits "
53409@@ -2121,11 +2121,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
53410 spin_lock_init(&osb->osb_xattr_lock);
53411 ocfs2_init_steal_slots(osb);
53412
53413- atomic_set(&osb->alloc_stats.moves, 0);
53414- atomic_set(&osb->alloc_stats.local_data, 0);
53415- atomic_set(&osb->alloc_stats.bitmap_data, 0);
53416- atomic_set(&osb->alloc_stats.bg_allocs, 0);
53417- atomic_set(&osb->alloc_stats.bg_extends, 0);
53418+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
53419+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
53420+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
53421+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
53422+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
53423
53424 /* Copy the blockcheck stats from the superblock probe */
53425 osb->osb_ecc_stats = *stats;
53426diff --git a/fs/open.c b/fs/open.c
53427index 9b33c0c..2ffcca2 100644
53428--- a/fs/open.c
53429+++ b/fs/open.c
53430@@ -31,6 +31,8 @@
53431 #include <linux/ima.h>
53432 #include <linux/dnotify.h>
53433
53434+#define CREATE_TRACE_POINTS
53435+#include <trace/events/fs.h>
53436 #include "internal.h"
53437
53438 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
53439@@ -101,6 +103,8 @@ long vfs_truncate(struct path *path, loff_t length)
53440 error = locks_verify_truncate(inode, NULL, length);
53441 if (!error)
53442 error = security_path_truncate(path);
53443+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
53444+ error = -EACCES;
53445 if (!error)
53446 error = do_truncate(path->dentry, length, 0, NULL);
53447
53448@@ -178,6 +182,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
53449 error = locks_verify_truncate(inode, f.file, length);
53450 if (!error)
53451 error = security_path_truncate(&f.file->f_path);
53452+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
53453+ error = -EACCES;
53454 if (!error)
53455 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
53456 sb_end_write(inode->i_sb);
53457@@ -373,6 +379,9 @@ retry:
53458 if (__mnt_is_readonly(path.mnt))
53459 res = -EROFS;
53460
53461+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
53462+ res = -EACCES;
53463+
53464 out_path_release:
53465 path_put(&path);
53466 if (retry_estale(res, lookup_flags)) {
53467@@ -404,6 +413,8 @@ retry:
53468 if (error)
53469 goto dput_and_out;
53470
53471+ gr_log_chdir(path.dentry, path.mnt);
53472+
53473 set_fs_pwd(current->fs, &path);
53474
53475 dput_and_out:
53476@@ -433,6 +444,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
53477 goto out_putf;
53478
53479 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
53480+
53481+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
53482+ error = -EPERM;
53483+
53484+ if (!error)
53485+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
53486+
53487 if (!error)
53488 set_fs_pwd(current->fs, &f.file->f_path);
53489 out_putf:
53490@@ -462,7 +480,13 @@ retry:
53491 if (error)
53492 goto dput_and_out;
53493
53494+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
53495+ goto dput_and_out;
53496+
53497 set_fs_root(current->fs, &path);
53498+
53499+ gr_handle_chroot_chdir(&path);
53500+
53501 error = 0;
53502 dput_and_out:
53503 path_put(&path);
53504@@ -484,6 +508,16 @@ static int chmod_common(struct path *path, umode_t mode)
53505 if (error)
53506 return error;
53507 mutex_lock(&inode->i_mutex);
53508+
53509+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
53510+ error = -EACCES;
53511+ goto out_unlock;
53512+ }
53513+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
53514+ error = -EACCES;
53515+ goto out_unlock;
53516+ }
53517+
53518 error = security_path_chmod(path, mode);
53519 if (error)
53520 goto out_unlock;
53521@@ -544,6 +578,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
53522 uid = make_kuid(current_user_ns(), user);
53523 gid = make_kgid(current_user_ns(), group);
53524
53525+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
53526+ return -EACCES;
53527+
53528 newattrs.ia_valid = ATTR_CTIME;
53529 if (user != (uid_t) -1) {
53530 if (!uid_valid(uid))
53531@@ -960,6 +997,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
53532 } else {
53533 fsnotify_open(f);
53534 fd_install(fd, f);
53535+ trace_do_sys_open(tmp->name, flags, mode);
53536 }
53537 }
53538 putname(tmp);
53539diff --git a/fs/pipe.c b/fs/pipe.c
53540index bd3479d..fb92c4d 100644
53541--- a/fs/pipe.c
53542+++ b/fs/pipe.c
53543@@ -438,9 +438,9 @@ redo:
53544 }
53545 if (bufs) /* More to do? */
53546 continue;
53547- if (!pipe->writers)
53548+ if (!atomic_read(&pipe->writers))
53549 break;
53550- if (!pipe->waiting_writers) {
53551+ if (!atomic_read(&pipe->waiting_writers)) {
53552 /* syscall merging: Usually we must not sleep
53553 * if O_NONBLOCK is set, or if we got some data.
53554 * But if a writer sleeps in kernel space, then
53555@@ -504,7 +504,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
53556 mutex_lock(&inode->i_mutex);
53557 pipe = inode->i_pipe;
53558
53559- if (!pipe->readers) {
53560+ if (!atomic_read(&pipe->readers)) {
53561 send_sig(SIGPIPE, current, 0);
53562 ret = -EPIPE;
53563 goto out;
53564@@ -553,7 +553,7 @@ redo1:
53565 for (;;) {
53566 int bufs;
53567
53568- if (!pipe->readers) {
53569+ if (!atomic_read(&pipe->readers)) {
53570 send_sig(SIGPIPE, current, 0);
53571 if (!ret)
53572 ret = -EPIPE;
53573@@ -644,9 +644,9 @@ redo2:
53574 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
53575 do_wakeup = 0;
53576 }
53577- pipe->waiting_writers++;
53578+ atomic_inc(&pipe->waiting_writers);
53579 pipe_wait(pipe);
53580- pipe->waiting_writers--;
53581+ atomic_dec(&pipe->waiting_writers);
53582 }
53583 out:
53584 mutex_unlock(&inode->i_mutex);
53585@@ -716,7 +716,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53586 mask = 0;
53587 if (filp->f_mode & FMODE_READ) {
53588 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
53589- if (!pipe->writers && filp->f_version != pipe->w_counter)
53590+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
53591 mask |= POLLHUP;
53592 }
53593
53594@@ -726,7 +726,7 @@ pipe_poll(struct file *filp, poll_table *wait)
53595 * Most Unices do not set POLLERR for FIFOs but on Linux they
53596 * behave exactly like pipes for poll().
53597 */
53598- if (!pipe->readers)
53599+ if (!atomic_read(&pipe->readers))
53600 mask |= POLLERR;
53601 }
53602
53603@@ -740,10 +740,10 @@ pipe_release(struct inode *inode, int decr, int decw)
53604
53605 mutex_lock(&inode->i_mutex);
53606 pipe = inode->i_pipe;
53607- pipe->readers -= decr;
53608- pipe->writers -= decw;
53609+ atomic_sub(decr, &pipe->readers);
53610+ atomic_sub(decw, &pipe->writers);
53611
53612- if (!pipe->readers && !pipe->writers) {
53613+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
53614 free_pipe_info(inode);
53615 } else {
53616 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
53617@@ -833,7 +833,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
53618
53619 if (inode->i_pipe) {
53620 ret = 0;
53621- inode->i_pipe->readers++;
53622+ atomic_inc(&inode->i_pipe->readers);
53623 }
53624
53625 mutex_unlock(&inode->i_mutex);
53626@@ -850,7 +850,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
53627
53628 if (inode->i_pipe) {
53629 ret = 0;
53630- inode->i_pipe->writers++;
53631+ atomic_inc(&inode->i_pipe->writers);
53632 }
53633
53634 mutex_unlock(&inode->i_mutex);
53635@@ -868,9 +868,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
53636 if (inode->i_pipe) {
53637 ret = 0;
53638 if (filp->f_mode & FMODE_READ)
53639- inode->i_pipe->readers++;
53640+ atomic_inc(&inode->i_pipe->readers);
53641 if (filp->f_mode & FMODE_WRITE)
53642- inode->i_pipe->writers++;
53643+ atomic_inc(&inode->i_pipe->writers);
53644 }
53645
53646 mutex_unlock(&inode->i_mutex);
53647@@ -962,7 +962,7 @@ void free_pipe_info(struct inode *inode)
53648 inode->i_pipe = NULL;
53649 }
53650
53651-static struct vfsmount *pipe_mnt __read_mostly;
53652+struct vfsmount *pipe_mnt __read_mostly;
53653
53654 /*
53655 * pipefs_dname() is called from d_path().
53656@@ -992,7 +992,8 @@ static struct inode * get_pipe_inode(void)
53657 goto fail_iput;
53658 inode->i_pipe = pipe;
53659
53660- pipe->readers = pipe->writers = 1;
53661+ atomic_set(&pipe->readers, 1);
53662+ atomic_set(&pipe->writers, 1);
53663 inode->i_fop = &rdwr_pipefifo_fops;
53664
53665 /*
53666diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
53667index 15af622..0e9f4467 100644
53668--- a/fs/proc/Kconfig
53669+++ b/fs/proc/Kconfig
53670@@ -30,12 +30,12 @@ config PROC_FS
53671
53672 config PROC_KCORE
53673 bool "/proc/kcore support" if !ARM
53674- depends on PROC_FS && MMU
53675+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
53676
53677 config PROC_VMCORE
53678 bool "/proc/vmcore support"
53679- depends on PROC_FS && CRASH_DUMP
53680- default y
53681+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
53682+ default n
53683 help
53684 Exports the dump image of crashed kernel in ELF format.
53685
53686@@ -59,8 +59,8 @@ config PROC_SYSCTL
53687 limited in memory.
53688
53689 config PROC_PAGE_MONITOR
53690- default y
53691- depends on PROC_FS && MMU
53692+ default n
53693+ depends on PROC_FS && MMU && !GRKERNSEC
53694 bool "Enable /proc page monitoring" if EXPERT
53695 help
53696 Various /proc files exist to monitor process memory utilization:
53697diff --git a/fs/proc/array.c b/fs/proc/array.c
53698index 6a91e6f..e54dbc14 100644
53699--- a/fs/proc/array.c
53700+++ b/fs/proc/array.c
53701@@ -60,6 +60,7 @@
53702 #include <linux/tty.h>
53703 #include <linux/string.h>
53704 #include <linux/mman.h>
53705+#include <linux/grsecurity.h>
53706 #include <linux/proc_fs.h>
53707 #include <linux/ioport.h>
53708 #include <linux/uaccess.h>
53709@@ -362,6 +363,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
53710 seq_putc(m, '\n');
53711 }
53712
53713+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53714+static inline void task_pax(struct seq_file *m, struct task_struct *p)
53715+{
53716+ if (p->mm)
53717+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
53718+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
53719+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
53720+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
53721+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
53722+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
53723+ else
53724+ seq_printf(m, "PaX:\t-----\n");
53725+}
53726+#endif
53727+
53728 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53729 struct pid *pid, struct task_struct *task)
53730 {
53731@@ -380,9 +396,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53732 task_cpus_allowed(m, task);
53733 cpuset_task_status_allowed(m, task);
53734 task_context_switch_counts(m, task);
53735+
53736+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53737+ task_pax(m, task);
53738+#endif
53739+
53740+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
53741+ task_grsec_rbac(m, task);
53742+#endif
53743+
53744 return 0;
53745 }
53746
53747+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53748+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53749+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53750+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53751+#endif
53752+
53753 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53754 struct pid *pid, struct task_struct *task, int whole)
53755 {
53756@@ -404,6 +435,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53757 char tcomm[sizeof(task->comm)];
53758 unsigned long flags;
53759
53760+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53761+ if (current->exec_id != m->exec_id) {
53762+ gr_log_badprocpid("stat");
53763+ return 0;
53764+ }
53765+#endif
53766+
53767 state = *get_task_state(task);
53768 vsize = eip = esp = 0;
53769 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
53770@@ -475,6 +513,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53771 gtime = task->gtime;
53772 }
53773
53774+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53775+ if (PAX_RAND_FLAGS(mm)) {
53776+ eip = 0;
53777+ esp = 0;
53778+ wchan = 0;
53779+ }
53780+#endif
53781+#ifdef CONFIG_GRKERNSEC_HIDESYM
53782+ wchan = 0;
53783+ eip =0;
53784+ esp =0;
53785+#endif
53786+
53787 /* scale priority and nice values from timeslices to -20..20 */
53788 /* to make it look like a "normal" Unix priority/nice value */
53789 priority = task_prio(task);
53790@@ -511,9 +562,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53791 seq_put_decimal_ull(m, ' ', vsize);
53792 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
53793 seq_put_decimal_ull(m, ' ', rsslim);
53794+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53795+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
53796+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
53797+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
53798+#else
53799 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
53800 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
53801 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
53802+#endif
53803 seq_put_decimal_ull(m, ' ', esp);
53804 seq_put_decimal_ull(m, ' ', eip);
53805 /* The signal information here is obsolete.
53806@@ -535,7 +592,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
53807 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
53808 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
53809
53810- if (mm && permitted) {
53811+ if (mm && permitted
53812+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53813+ && !PAX_RAND_FLAGS(mm)
53814+#endif
53815+ ) {
53816 seq_put_decimal_ull(m, ' ', mm->start_data);
53817 seq_put_decimal_ull(m, ' ', mm->end_data);
53818 seq_put_decimal_ull(m, ' ', mm->start_brk);
53819@@ -573,8 +634,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53820 struct pid *pid, struct task_struct *task)
53821 {
53822 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
53823- struct mm_struct *mm = get_task_mm(task);
53824+ struct mm_struct *mm;
53825
53826+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53827+ if (current->exec_id != m->exec_id) {
53828+ gr_log_badprocpid("statm");
53829+ return 0;
53830+ }
53831+#endif
53832+ mm = get_task_mm(task);
53833 if (mm) {
53834 size = task_statm(mm, &shared, &text, &data, &resident);
53835 mmput(mm);
53836@@ -597,6 +665,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53837 return 0;
53838 }
53839
53840+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53841+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
53842+{
53843+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
53844+}
53845+#endif
53846+
53847 #ifdef CONFIG_CHECKPOINT_RESTORE
53848 static struct pid *
53849 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
53850diff --git a/fs/proc/base.c b/fs/proc/base.c
53851index 9b43ff77..ba3e990 100644
53852--- a/fs/proc/base.c
53853+++ b/fs/proc/base.c
53854@@ -111,6 +111,14 @@ struct pid_entry {
53855 union proc_op op;
53856 };
53857
53858+struct getdents_callback {
53859+ struct linux_dirent __user * current_dir;
53860+ struct linux_dirent __user * previous;
53861+ struct file * file;
53862+ int count;
53863+ int error;
53864+};
53865+
53866 #define NOD(NAME, MODE, IOP, FOP, OP) { \
53867 .name = (NAME), \
53868 .len = sizeof(NAME) - 1, \
53869@@ -208,6 +216,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
53870 if (!mm->arg_end)
53871 goto out_mm; /* Shh! No looking before we're done */
53872
53873+ if (gr_acl_handle_procpidmem(task))
53874+ goto out_mm;
53875+
53876 len = mm->arg_end - mm->arg_start;
53877
53878 if (len > PAGE_SIZE)
53879@@ -235,12 +246,28 @@ out:
53880 return res;
53881 }
53882
53883+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53884+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53885+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53886+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53887+#endif
53888+
53889 static int proc_pid_auxv(struct task_struct *task, char *buffer)
53890 {
53891 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
53892 int res = PTR_ERR(mm);
53893 if (mm && !IS_ERR(mm)) {
53894 unsigned int nwords = 0;
53895+
53896+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53897+ /* allow if we're currently ptracing this task */
53898+ if (PAX_RAND_FLAGS(mm) &&
53899+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
53900+ mmput(mm);
53901+ return 0;
53902+ }
53903+#endif
53904+
53905 do {
53906 nwords += 2;
53907 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
53908@@ -254,7 +281,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
53909 }
53910
53911
53912-#ifdef CONFIG_KALLSYMS
53913+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53914 /*
53915 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
53916 * Returns the resolved symbol. If that fails, simply return the address.
53917@@ -293,7 +320,7 @@ static void unlock_trace(struct task_struct *task)
53918 mutex_unlock(&task->signal->cred_guard_mutex);
53919 }
53920
53921-#ifdef CONFIG_STACKTRACE
53922+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53923
53924 #define MAX_STACK_TRACE_DEPTH 64
53925
53926@@ -485,7 +512,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
53927 return count;
53928 }
53929
53930-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53931+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53932 static int proc_pid_syscall(struct task_struct *task, char *buffer)
53933 {
53934 long nr;
53935@@ -514,7 +541,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
53936 /************************************************************************/
53937
53938 /* permission checks */
53939-static int proc_fd_access_allowed(struct inode *inode)
53940+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
53941 {
53942 struct task_struct *task;
53943 int allowed = 0;
53944@@ -524,7 +551,10 @@ static int proc_fd_access_allowed(struct inode *inode)
53945 */
53946 task = get_proc_task(inode);
53947 if (task) {
53948- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53949+ if (log)
53950+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
53951+ else
53952+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
53953 put_task_struct(task);
53954 }
53955 return allowed;
53956@@ -555,10 +585,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
53957 struct task_struct *task,
53958 int hide_pid_min)
53959 {
53960+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53961+ return false;
53962+
53963+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53964+ rcu_read_lock();
53965+ {
53966+ const struct cred *tmpcred = current_cred();
53967+ const struct cred *cred = __task_cred(task);
53968+
53969+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
53970+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53971+ || in_group_p(grsec_proc_gid)
53972+#endif
53973+ ) {
53974+ rcu_read_unlock();
53975+ return true;
53976+ }
53977+ }
53978+ rcu_read_unlock();
53979+
53980+ if (!pid->hide_pid)
53981+ return false;
53982+#endif
53983+
53984 if (pid->hide_pid < hide_pid_min)
53985 return true;
53986 if (in_group_p(pid->pid_gid))
53987 return true;
53988+
53989 return ptrace_may_access(task, PTRACE_MODE_READ);
53990 }
53991
53992@@ -576,7 +631,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
53993 put_task_struct(task);
53994
53995 if (!has_perms) {
53996+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53997+ {
53998+#else
53999 if (pid->hide_pid == 2) {
54000+#endif
54001 /*
54002 * Let's make getdents(), stat(), and open()
54003 * consistent with each other. If a process
54004@@ -674,6 +733,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54005 if (!task)
54006 return -ESRCH;
54007
54008+ if (gr_acl_handle_procpidmem(task)) {
54009+ put_task_struct(task);
54010+ return -EPERM;
54011+ }
54012+
54013 mm = mm_access(task, mode);
54014 put_task_struct(task);
54015
54016@@ -689,6 +753,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
54017
54018 file->private_data = mm;
54019
54020+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54021+ file->f_version = current->exec_id;
54022+#endif
54023+
54024 return 0;
54025 }
54026
54027@@ -710,6 +778,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
54028 ssize_t copied;
54029 char *page;
54030
54031+#ifdef CONFIG_GRKERNSEC
54032+ if (write)
54033+ return -EPERM;
54034+#endif
54035+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54036+ if (file->f_version != current->exec_id) {
54037+ gr_log_badprocpid("mem");
54038+ return 0;
54039+ }
54040+#endif
54041+
54042 if (!mm)
54043 return 0;
54044
54045@@ -814,6 +893,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
54046 if (!mm)
54047 return 0;
54048
54049+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54050+ if (file->f_version != current->exec_id) {
54051+ gr_log_badprocpid("environ");
54052+ return 0;
54053+ }
54054+#endif
54055+
54056 page = (char *)__get_free_page(GFP_TEMPORARY);
54057 if (!page)
54058 return -ENOMEM;
54059@@ -1429,7 +1515,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
54060 int error = -EACCES;
54061
54062 /* Are we allowed to snoop on the tasks file descriptors? */
54063- if (!proc_fd_access_allowed(inode))
54064+ if (!proc_fd_access_allowed(inode, 0))
54065 goto out;
54066
54067 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54068@@ -1473,8 +1559,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
54069 struct path path;
54070
54071 /* Are we allowed to snoop on the tasks file descriptors? */
54072- if (!proc_fd_access_allowed(inode))
54073- goto out;
54074+ /* logging this is needed for learning on chromium to work properly,
54075+ but we don't want to flood the logs from 'ps' which does a readlink
54076+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
54077+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
54078+ */
54079+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
54080+ if (!proc_fd_access_allowed(inode,0))
54081+ goto out;
54082+ } else {
54083+ if (!proc_fd_access_allowed(inode,1))
54084+ goto out;
54085+ }
54086
54087 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
54088 if (error)
54089@@ -1524,7 +1620,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
54090 rcu_read_lock();
54091 cred = __task_cred(task);
54092 inode->i_uid = cred->euid;
54093+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54094+ inode->i_gid = grsec_proc_gid;
54095+#else
54096 inode->i_gid = cred->egid;
54097+#endif
54098 rcu_read_unlock();
54099 }
54100 security_task_to_inode(task, inode);
54101@@ -1560,10 +1660,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
54102 return -ENOENT;
54103 }
54104 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54105+#ifdef CONFIG_GRKERNSEC_PROC_USER
54106+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54107+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54108+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54109+#endif
54110 task_dumpable(task)) {
54111 cred = __task_cred(task);
54112 stat->uid = cred->euid;
54113+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54114+ stat->gid = grsec_proc_gid;
54115+#else
54116 stat->gid = cred->egid;
54117+#endif
54118 }
54119 }
54120 rcu_read_unlock();
54121@@ -1601,11 +1710,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
54122
54123 if (task) {
54124 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
54125+#ifdef CONFIG_GRKERNSEC_PROC_USER
54126+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
54127+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54128+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
54129+#endif
54130 task_dumpable(task)) {
54131 rcu_read_lock();
54132 cred = __task_cred(task);
54133 inode->i_uid = cred->euid;
54134+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54135+ inode->i_gid = grsec_proc_gid;
54136+#else
54137 inode->i_gid = cred->egid;
54138+#endif
54139 rcu_read_unlock();
54140 } else {
54141 inode->i_uid = GLOBAL_ROOT_UID;
54142@@ -2058,6 +2176,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
54143 if (!task)
54144 goto out_no_task;
54145
54146+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54147+ goto out;
54148+
54149 /*
54150 * Yes, it does not scale. And it should not. Don't add
54151 * new entries into /proc/<tgid>/ without very good reasons.
54152@@ -2102,6 +2223,9 @@ static int proc_pident_readdir(struct file *filp,
54153 if (!task)
54154 goto out_no_task;
54155
54156+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54157+ goto out;
54158+
54159 ret = 0;
54160 i = filp->f_pos;
54161 switch (i) {
54162@@ -2515,7 +2639,7 @@ static const struct pid_entry tgid_base_stuff[] = {
54163 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
54164 #endif
54165 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54166-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54167+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54168 INF("syscall", S_IRUGO, proc_pid_syscall),
54169 #endif
54170 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54171@@ -2540,10 +2664,10 @@ static const struct pid_entry tgid_base_stuff[] = {
54172 #ifdef CONFIG_SECURITY
54173 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54174 #endif
54175-#ifdef CONFIG_KALLSYMS
54176+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54177 INF("wchan", S_IRUGO, proc_pid_wchan),
54178 #endif
54179-#ifdef CONFIG_STACKTRACE
54180+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54181 ONE("stack", S_IRUGO, proc_pid_stack),
54182 #endif
54183 #ifdef CONFIG_SCHEDSTATS
54184@@ -2577,6 +2701,9 @@ static const struct pid_entry tgid_base_stuff[] = {
54185 #ifdef CONFIG_HARDWALL
54186 INF("hardwall", S_IRUGO, proc_pid_hardwall),
54187 #endif
54188+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54189+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
54190+#endif
54191 #ifdef CONFIG_USER_NS
54192 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
54193 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
54194@@ -2705,7 +2832,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
54195 if (!inode)
54196 goto out;
54197
54198+#ifdef CONFIG_GRKERNSEC_PROC_USER
54199+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
54200+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54201+ inode->i_gid = grsec_proc_gid;
54202+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
54203+#else
54204 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
54205+#endif
54206 inode->i_op = &proc_tgid_base_inode_operations;
54207 inode->i_fop = &proc_tgid_base_operations;
54208 inode->i_flags|=S_IMMUTABLE;
54209@@ -2743,7 +2877,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
54210 if (!task)
54211 goto out;
54212
54213+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
54214+ goto out_put_task;
54215+
54216 result = proc_pid_instantiate(dir, dentry, task, NULL);
54217+out_put_task:
54218 put_task_struct(task);
54219 out:
54220 return result;
54221@@ -2806,6 +2944,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
54222 static int fake_filldir(void *buf, const char *name, int namelen,
54223 loff_t offset, u64 ino, unsigned d_type)
54224 {
54225+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
54226+ __buf->error = -EINVAL;
54227 return 0;
54228 }
54229
54230@@ -2857,7 +2997,7 @@ static const struct pid_entry tid_base_stuff[] = {
54231 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
54232 #endif
54233 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
54234-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
54235+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
54236 INF("syscall", S_IRUGO, proc_pid_syscall),
54237 #endif
54238 INF("cmdline", S_IRUGO, proc_pid_cmdline),
54239@@ -2884,10 +3024,10 @@ static const struct pid_entry tid_base_stuff[] = {
54240 #ifdef CONFIG_SECURITY
54241 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
54242 #endif
54243-#ifdef CONFIG_KALLSYMS
54244+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54245 INF("wchan", S_IRUGO, proc_pid_wchan),
54246 #endif
54247-#ifdef CONFIG_STACKTRACE
54248+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
54249 ONE("stack", S_IRUGO, proc_pid_stack),
54250 #endif
54251 #ifdef CONFIG_SCHEDSTATS
54252diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
54253index 82676e3..5f8518a 100644
54254--- a/fs/proc/cmdline.c
54255+++ b/fs/proc/cmdline.c
54256@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
54257
54258 static int __init proc_cmdline_init(void)
54259 {
54260+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54261+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
54262+#else
54263 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
54264+#endif
54265 return 0;
54266 }
54267 module_init(proc_cmdline_init);
54268diff --git a/fs/proc/devices.c b/fs/proc/devices.c
54269index b143471..bb105e5 100644
54270--- a/fs/proc/devices.c
54271+++ b/fs/proc/devices.c
54272@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
54273
54274 static int __init proc_devices_init(void)
54275 {
54276+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54277+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
54278+#else
54279 proc_create("devices", 0, NULL, &proc_devinfo_operations);
54280+#endif
54281 return 0;
54282 }
54283 module_init(proc_devices_init);
54284diff --git a/fs/proc/fd.c b/fs/proc/fd.c
54285index d7a4a28..0201742 100644
54286--- a/fs/proc/fd.c
54287+++ b/fs/proc/fd.c
54288@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
54289 if (!task)
54290 return -ENOENT;
54291
54292- files = get_files_struct(task);
54293+ if (!gr_acl_handle_procpidmem(task))
54294+ files = get_files_struct(task);
54295 put_task_struct(task);
54296
54297 if (files) {
54298@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
54299 */
54300 int proc_fd_permission(struct inode *inode, int mask)
54301 {
54302+ struct task_struct *task;
54303 int rv = generic_permission(inode, mask);
54304- if (rv == 0)
54305- return 0;
54306+
54307 if (task_pid(current) == proc_pid(inode))
54308 rv = 0;
54309+
54310+ task = get_proc_task(inode);
54311+ if (task == NULL)
54312+ return rv;
54313+
54314+ if (gr_acl_handle_procpidmem(task))
54315+ rv = -EACCES;
54316+
54317+ put_task_struct(task);
54318+
54319 return rv;
54320 }
54321
54322diff --git a/fs/proc/inode.c b/fs/proc/inode.c
54323index 439ae688..c21ac36 100644
54324--- a/fs/proc/inode.c
54325+++ b/fs/proc/inode.c
54326@@ -21,11 +21,17 @@
54327 #include <linux/seq_file.h>
54328 #include <linux/slab.h>
54329 #include <linux/mount.h>
54330+#include <linux/grsecurity.h>
54331
54332 #include <asm/uaccess.h>
54333
54334 #include "internal.h"
54335
54336+#ifdef CONFIG_PROC_SYSCTL
54337+extern const struct inode_operations proc_sys_inode_operations;
54338+extern const struct inode_operations proc_sys_dir_operations;
54339+#endif
54340+
54341 static void proc_evict_inode(struct inode *inode)
54342 {
54343 struct proc_dir_entry *de;
54344@@ -53,6 +59,13 @@ static void proc_evict_inode(struct inode *inode)
54345 ns = PROC_I(inode)->ns;
54346 if (ns_ops && ns)
54347 ns_ops->put(ns);
54348+
54349+#ifdef CONFIG_PROC_SYSCTL
54350+ if (inode->i_op == &proc_sys_inode_operations ||
54351+ inode->i_op == &proc_sys_dir_operations)
54352+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
54353+#endif
54354+
54355 }
54356
54357 static struct kmem_cache * proc_inode_cachep;
54358@@ -457,7 +470,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
54359 if (de->mode) {
54360 inode->i_mode = de->mode;
54361 inode->i_uid = de->uid;
54362+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
54363+ inode->i_gid = grsec_proc_gid;
54364+#else
54365 inode->i_gid = de->gid;
54366+#endif
54367 }
54368 if (de->size)
54369 inode->i_size = de->size;
54370diff --git a/fs/proc/internal.h b/fs/proc/internal.h
54371index 252544c..04395b9 100644
54372--- a/fs/proc/internal.h
54373+++ b/fs/proc/internal.h
54374@@ -55,6 +55,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
54375 struct pid *pid, struct task_struct *task);
54376 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
54377 struct pid *pid, struct task_struct *task);
54378+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
54379+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
54380+#endif
54381 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
54382
54383 extern const struct file_operations proc_tid_children_operations;
54384diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
54385index e96d4f1..8b116ed 100644
54386--- a/fs/proc/kcore.c
54387+++ b/fs/proc/kcore.c
54388@@ -480,9 +480,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54389 * the addresses in the elf_phdr on our list.
54390 */
54391 start = kc_offset_to_vaddr(*fpos - elf_buflen);
54392- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
54393+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
54394+ if (tsz > buflen)
54395 tsz = buflen;
54396-
54397+
54398 while (buflen) {
54399 struct kcore_list *m;
54400
54401@@ -511,20 +512,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54402 kfree(elf_buf);
54403 } else {
54404 if (kern_addr_valid(start)) {
54405- unsigned long n;
54406+ char *elf_buf;
54407+ mm_segment_t oldfs;
54408
54409- n = copy_to_user(buffer, (char *)start, tsz);
54410- /*
54411- * We cannot distinguish between fault on source
54412- * and fault on destination. When this happens
54413- * we clear too and hope it will trigger the
54414- * EFAULT again.
54415- */
54416- if (n) {
54417- if (clear_user(buffer + tsz - n,
54418- n))
54419+ elf_buf = kmalloc(tsz, GFP_KERNEL);
54420+ if (!elf_buf)
54421+ return -ENOMEM;
54422+ oldfs = get_fs();
54423+ set_fs(KERNEL_DS);
54424+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
54425+ set_fs(oldfs);
54426+ if (copy_to_user(buffer, elf_buf, tsz)) {
54427+ kfree(elf_buf);
54428 return -EFAULT;
54429+ }
54430 }
54431+ set_fs(oldfs);
54432+ kfree(elf_buf);
54433 } else {
54434 if (clear_user(buffer, tsz))
54435 return -EFAULT;
54436@@ -544,6 +548,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
54437
54438 static int open_kcore(struct inode *inode, struct file *filp)
54439 {
54440+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
54441+ return -EPERM;
54442+#endif
54443 if (!capable(CAP_SYS_RAWIO))
54444 return -EPERM;
54445 if (kcore_need_update)
54446diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
54447index 80e4645..53e5fcf 100644
54448--- a/fs/proc/meminfo.c
54449+++ b/fs/proc/meminfo.c
54450@@ -158,7 +158,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
54451 vmi.used >> 10,
54452 vmi.largest_chunk >> 10
54453 #ifdef CONFIG_MEMORY_FAILURE
54454- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
54455+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
54456 #endif
54457 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
54458 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
54459diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
54460index b1822dd..df622cb 100644
54461--- a/fs/proc/nommu.c
54462+++ b/fs/proc/nommu.c
54463@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
54464 if (len < 1)
54465 len = 1;
54466 seq_printf(m, "%*c", len, ' ');
54467- seq_path(m, &file->f_path, "");
54468+ seq_path(m, &file->f_path, "\n\\");
54469 }
54470
54471 seq_putc(m, '\n');
54472diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
54473index fe72cd0..21b52ff 100644
54474--- a/fs/proc/proc_net.c
54475+++ b/fs/proc/proc_net.c
54476@@ -23,6 +23,7 @@
54477 #include <linux/nsproxy.h>
54478 #include <net/net_namespace.h>
54479 #include <linux/seq_file.h>
54480+#include <linux/grsecurity.h>
54481
54482 #include "internal.h"
54483
54484@@ -105,6 +106,17 @@ static struct net *get_proc_task_net(struct inode *dir)
54485 struct task_struct *task;
54486 struct nsproxy *ns;
54487 struct net *net = NULL;
54488+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54489+ const struct cred *cred = current_cred();
54490+#endif
54491+
54492+#ifdef CONFIG_GRKERNSEC_PROC_USER
54493+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
54494+ return net;
54495+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54496+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
54497+ return net;
54498+#endif
54499
54500 rcu_read_lock();
54501 task = pid_task(proc_pid(dir), PIDTYPE_PID);
54502diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
54503index 1827d88..43b0279 100644
54504--- a/fs/proc/proc_sysctl.c
54505+++ b/fs/proc/proc_sysctl.c
54506@@ -12,11 +12,15 @@
54507 #include <linux/module.h>
54508 #include "internal.h"
54509
54510+extern int gr_handle_chroot_sysctl(const int op);
54511+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
54512+ const int op);
54513+
54514 static const struct dentry_operations proc_sys_dentry_operations;
54515 static const struct file_operations proc_sys_file_operations;
54516-static const struct inode_operations proc_sys_inode_operations;
54517+const struct inode_operations proc_sys_inode_operations;
54518 static const struct file_operations proc_sys_dir_file_operations;
54519-static const struct inode_operations proc_sys_dir_operations;
54520+const struct inode_operations proc_sys_dir_operations;
54521
54522 void proc_sys_poll_notify(struct ctl_table_poll *poll)
54523 {
54524@@ -466,6 +470,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
54525
54526 err = NULL;
54527 d_set_d_op(dentry, &proc_sys_dentry_operations);
54528+
54529+ gr_handle_proc_create(dentry, inode);
54530+
54531 d_add(dentry, inode);
54532
54533 out:
54534@@ -481,6 +488,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54535 struct inode *inode = filp->f_path.dentry->d_inode;
54536 struct ctl_table_header *head = grab_header(inode);
54537 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
54538+ int op = write ? MAY_WRITE : MAY_READ;
54539 ssize_t error;
54540 size_t res;
54541
54542@@ -492,7 +500,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54543 * and won't be until we finish.
54544 */
54545 error = -EPERM;
54546- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
54547+ if (sysctl_perm(head, table, op))
54548 goto out;
54549
54550 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
54551@@ -500,6 +508,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
54552 if (!table->proc_handler)
54553 goto out;
54554
54555+#ifdef CONFIG_GRKERNSEC
54556+ error = -EPERM;
54557+ if (gr_handle_chroot_sysctl(op))
54558+ goto out;
54559+ dget(filp->f_path.dentry);
54560+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
54561+ dput(filp->f_path.dentry);
54562+ goto out;
54563+ }
54564+ dput(filp->f_path.dentry);
54565+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
54566+ goto out;
54567+ if (write && !capable(CAP_SYS_ADMIN))
54568+ goto out;
54569+#endif
54570+
54571 /* careful: calling conventions are nasty here */
54572 res = count;
54573 error = table->proc_handler(table, write, buf, &res, ppos);
54574@@ -597,6 +621,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
54575 return -ENOMEM;
54576 } else {
54577 d_set_d_op(child, &proc_sys_dentry_operations);
54578+
54579+ gr_handle_proc_create(child, inode);
54580+
54581 d_add(child, inode);
54582 }
54583 } else {
54584@@ -640,6 +667,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
54585 if ((*pos)++ < file->f_pos)
54586 return 0;
54587
54588+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
54589+ return 0;
54590+
54591 if (unlikely(S_ISLNK(table->mode)))
54592 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
54593 else
54594@@ -750,6 +780,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
54595 if (IS_ERR(head))
54596 return PTR_ERR(head);
54597
54598+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
54599+ return -ENOENT;
54600+
54601 generic_fillattr(inode, stat);
54602 if (table)
54603 stat->mode = (stat->mode & S_IFMT) | table->mode;
54604@@ -772,13 +805,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
54605 .llseek = generic_file_llseek,
54606 };
54607
54608-static const struct inode_operations proc_sys_inode_operations = {
54609+const struct inode_operations proc_sys_inode_operations = {
54610 .permission = proc_sys_permission,
54611 .setattr = proc_sys_setattr,
54612 .getattr = proc_sys_getattr,
54613 };
54614
54615-static const struct inode_operations proc_sys_dir_operations = {
54616+const struct inode_operations proc_sys_dir_operations = {
54617 .lookup = proc_sys_lookup,
54618 .permission = proc_sys_permission,
54619 .setattr = proc_sys_setattr,
54620@@ -854,7 +887,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
54621 static struct ctl_dir *new_dir(struct ctl_table_set *set,
54622 const char *name, int namelen)
54623 {
54624- struct ctl_table *table;
54625+ ctl_table_no_const *table;
54626 struct ctl_dir *new;
54627 struct ctl_node *node;
54628 char *new_name;
54629@@ -866,7 +899,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
54630 return NULL;
54631
54632 node = (struct ctl_node *)(new + 1);
54633- table = (struct ctl_table *)(node + 1);
54634+ table = (ctl_table_no_const *)(node + 1);
54635 new_name = (char *)(table + 2);
54636 memcpy(new_name, name, namelen);
54637 new_name[namelen] = '\0';
54638@@ -1035,7 +1068,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
54639 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
54640 struct ctl_table_root *link_root)
54641 {
54642- struct ctl_table *link_table, *entry, *link;
54643+ ctl_table_no_const *link_table, *link;
54644+ struct ctl_table *entry;
54645 struct ctl_table_header *links;
54646 struct ctl_node *node;
54647 char *link_name;
54648@@ -1058,7 +1092,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
54649 return NULL;
54650
54651 node = (struct ctl_node *)(links + 1);
54652- link_table = (struct ctl_table *)(node + nr_entries);
54653+ link_table = (ctl_table_no_const *)(node + nr_entries);
54654 link_name = (char *)&link_table[nr_entries + 1];
54655
54656 for (link = link_table, entry = table; entry->procname; link++, entry++) {
54657@@ -1306,8 +1340,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54658 struct ctl_table_header ***subheader, struct ctl_table_set *set,
54659 struct ctl_table *table)
54660 {
54661- struct ctl_table *ctl_table_arg = NULL;
54662- struct ctl_table *entry, *files;
54663+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
54664+ struct ctl_table *entry;
54665 int nr_files = 0;
54666 int nr_dirs = 0;
54667 int err = -ENOMEM;
54668@@ -1319,10 +1353,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54669 nr_files++;
54670 }
54671
54672- files = table;
54673 /* If there are mixed files and directories we need a new table */
54674 if (nr_dirs && nr_files) {
54675- struct ctl_table *new;
54676+ ctl_table_no_const *new;
54677 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
54678 GFP_KERNEL);
54679 if (!files)
54680@@ -1340,7 +1373,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
54681 /* Register everything except a directory full of subdirectories */
54682 if (nr_files || !nr_dirs) {
54683 struct ctl_table_header *header;
54684- header = __register_sysctl_table(set, path, files);
54685+ header = __register_sysctl_table(set, path, files ? files : table);
54686 if (!header) {
54687 kfree(ctl_table_arg);
54688 goto out;
54689diff --git a/fs/proc/root.c b/fs/proc/root.c
54690index c6e9fac..a740964 100644
54691--- a/fs/proc/root.c
54692+++ b/fs/proc/root.c
54693@@ -176,7 +176,15 @@ void __init proc_root_init(void)
54694 #ifdef CONFIG_PROC_DEVICETREE
54695 proc_device_tree_init();
54696 #endif
54697+#ifdef CONFIG_GRKERNSEC_PROC_ADD
54698+#ifdef CONFIG_GRKERNSEC_PROC_USER
54699+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
54700+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54701+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
54702+#endif
54703+#else
54704 proc_mkdir("bus", NULL);
54705+#endif
54706 proc_sys_init();
54707 }
54708
54709diff --git a/fs/proc/self.c b/fs/proc/self.c
54710index aa5cc3b..c91a5d0 100644
54711--- a/fs/proc/self.c
54712+++ b/fs/proc/self.c
54713@@ -37,7 +37,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
54714 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
54715 void *cookie)
54716 {
54717- char *s = nd_get_link(nd);
54718+ const char *s = nd_get_link(nd);
54719 if (!IS_ERR(s))
54720 kfree(s);
54721 }
54722diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
54723index ca5ce7f..02c1cf0 100644
54724--- a/fs/proc/task_mmu.c
54725+++ b/fs/proc/task_mmu.c
54726@@ -11,12 +11,19 @@
54727 #include <linux/rmap.h>
54728 #include <linux/swap.h>
54729 #include <linux/swapops.h>
54730+#include <linux/grsecurity.h>
54731
54732 #include <asm/elf.h>
54733 #include <asm/uaccess.h>
54734 #include <asm/tlbflush.h>
54735 #include "internal.h"
54736
54737+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54738+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
54739+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
54740+ _mm->pax_flags & MF_PAX_SEGMEXEC))
54741+#endif
54742+
54743 void task_mem(struct seq_file *m, struct mm_struct *mm)
54744 {
54745 unsigned long data, text, lib, swap;
54746@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54747 "VmExe:\t%8lu kB\n"
54748 "VmLib:\t%8lu kB\n"
54749 "VmPTE:\t%8lu kB\n"
54750- "VmSwap:\t%8lu kB\n",
54751- hiwater_vm << (PAGE_SHIFT-10),
54752+ "VmSwap:\t%8lu kB\n"
54753+
54754+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54755+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
54756+#endif
54757+
54758+ ,hiwater_vm << (PAGE_SHIFT-10),
54759 total_vm << (PAGE_SHIFT-10),
54760 mm->locked_vm << (PAGE_SHIFT-10),
54761 mm->pinned_vm << (PAGE_SHIFT-10),
54762@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54763 data << (PAGE_SHIFT-10),
54764 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
54765 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
54766- swap << (PAGE_SHIFT-10));
54767+ swap << (PAGE_SHIFT-10)
54768+
54769+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
54770+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54771+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
54772+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
54773+#else
54774+ , mm->context.user_cs_base
54775+ , mm->context.user_cs_limit
54776+#endif
54777+#endif
54778+
54779+ );
54780 }
54781
54782 unsigned long task_vsize(struct mm_struct *mm)
54783@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54784 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
54785 }
54786
54787- /* We don't show the stack guard page in /proc/maps */
54788+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54789+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
54790+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
54791+#else
54792 start = vma->vm_start;
54793- if (stack_guard_page_start(vma, start))
54794- start += PAGE_SIZE;
54795 end = vma->vm_end;
54796- if (stack_guard_page_end(vma, end))
54797- end -= PAGE_SIZE;
54798+#endif
54799
54800 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
54801 start,
54802@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54803 flags & VM_WRITE ? 'w' : '-',
54804 flags & VM_EXEC ? 'x' : '-',
54805 flags & VM_MAYSHARE ? 's' : 'p',
54806+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54807+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
54808+#else
54809 pgoff,
54810+#endif
54811 MAJOR(dev), MINOR(dev), ino, &len);
54812
54813 /*
54814@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54815 */
54816 if (file) {
54817 pad_len_spaces(m, len);
54818- seq_path(m, &file->f_path, "\n");
54819+ seq_path(m, &file->f_path, "\n\\");
54820 goto done;
54821 }
54822
54823@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
54824 * Thread stack in /proc/PID/task/TID/maps or
54825 * the main process stack.
54826 */
54827- if (!is_pid || (vma->vm_start <= mm->start_stack &&
54828- vma->vm_end >= mm->start_stack)) {
54829+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
54830+ (vma->vm_start <= mm->start_stack &&
54831+ vma->vm_end >= mm->start_stack)) {
54832 name = "[stack]";
54833 } else {
54834 /* Thread stack in /proc/PID/maps */
54835@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
54836 struct proc_maps_private *priv = m->private;
54837 struct task_struct *task = priv->task;
54838
54839+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54840+ if (current->exec_id != m->exec_id) {
54841+ gr_log_badprocpid("maps");
54842+ return 0;
54843+ }
54844+#endif
54845+
54846 show_map_vma(m, vma, is_pid);
54847
54848 if (m->count < m->size) /* vma is copied successfully */
54849@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
54850 .private = &mss,
54851 };
54852
54853+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54854+ if (current->exec_id != m->exec_id) {
54855+ gr_log_badprocpid("smaps");
54856+ return 0;
54857+ }
54858+#endif
54859 memset(&mss, 0, sizeof mss);
54860- mss.vma = vma;
54861- /* mmap_sem is held in m_start */
54862- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54863- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54864-
54865+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54866+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
54867+#endif
54868+ mss.vma = vma;
54869+ /* mmap_sem is held in m_start */
54870+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
54871+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
54872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54873+ }
54874+#endif
54875 show_map_vma(m, vma, is_pid);
54876
54877 seq_printf(m,
54878@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
54879 "KernelPageSize: %8lu kB\n"
54880 "MMUPageSize: %8lu kB\n"
54881 "Locked: %8lu kB\n",
54882+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54883+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
54884+#else
54885 (vma->vm_end - vma->vm_start) >> 10,
54886+#endif
54887 mss.resident >> 10,
54888 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
54889 mss.shared_clean >> 10,
54890@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
54891 int n;
54892 char buffer[50];
54893
54894+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54895+ if (current->exec_id != m->exec_id) {
54896+ gr_log_badprocpid("numa_maps");
54897+ return 0;
54898+ }
54899+#endif
54900+
54901 if (!mm)
54902 return 0;
54903
54904@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
54905 mpol_to_str(buffer, sizeof(buffer), pol);
54906 mpol_cond_put(pol);
54907
54908+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54909+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
54910+#else
54911 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
54912+#endif
54913
54914 if (file) {
54915 seq_printf(m, " file=");
54916- seq_path(m, &file->f_path, "\n\t= ");
54917+ seq_path(m, &file->f_path, "\n\t\\= ");
54918 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
54919 seq_printf(m, " heap");
54920 } else {
54921diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
54922index 1ccfa53..0848f95 100644
54923--- a/fs/proc/task_nommu.c
54924+++ b/fs/proc/task_nommu.c
54925@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
54926 else
54927 bytes += kobjsize(mm);
54928
54929- if (current->fs && current->fs->users > 1)
54930+ if (current->fs && atomic_read(&current->fs->users) > 1)
54931 sbytes += kobjsize(current->fs);
54932 else
54933 bytes += kobjsize(current->fs);
54934@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
54935
54936 if (file) {
54937 pad_len_spaces(m, len);
54938- seq_path(m, &file->f_path, "");
54939+ seq_path(m, &file->f_path, "\n\\");
54940 } else if (mm) {
54941 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
54942
54943diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
54944index 16e8abb..2dcf914 100644
54945--- a/fs/quota/netlink.c
54946+++ b/fs/quota/netlink.c
54947@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
54948 void quota_send_warning(struct kqid qid, dev_t dev,
54949 const char warntype)
54950 {
54951- static atomic_t seq;
54952+ static atomic_unchecked_t seq;
54953 struct sk_buff *skb;
54954 void *msg_head;
54955 int ret;
54956@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
54957 "VFS: Not enough memory to send quota warning.\n");
54958 return;
54959 }
54960- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
54961+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
54962 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
54963 if (!msg_head) {
54964 printk(KERN_ERR
54965diff --git a/fs/readdir.c b/fs/readdir.c
54966index 5e69ef5..e5d9099 100644
54967--- a/fs/readdir.c
54968+++ b/fs/readdir.c
54969@@ -17,6 +17,7 @@
54970 #include <linux/security.h>
54971 #include <linux/syscalls.h>
54972 #include <linux/unistd.h>
54973+#include <linux/namei.h>
54974
54975 #include <asm/uaccess.h>
54976
54977@@ -67,6 +68,7 @@ struct old_linux_dirent {
54978
54979 struct readdir_callback {
54980 struct old_linux_dirent __user * dirent;
54981+ struct file * file;
54982 int result;
54983 };
54984
54985@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
54986 buf->result = -EOVERFLOW;
54987 return -EOVERFLOW;
54988 }
54989+
54990+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
54991+ return 0;
54992+
54993 buf->result++;
54994 dirent = buf->dirent;
54995 if (!access_ok(VERIFY_WRITE, dirent,
54996@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
54997
54998 buf.result = 0;
54999 buf.dirent = dirent;
55000+ buf.file = f.file;
55001
55002 error = vfs_readdir(f.file, fillonedir, &buf);
55003 if (buf.result)
55004@@ -139,6 +146,7 @@ struct linux_dirent {
55005 struct getdents_callback {
55006 struct linux_dirent __user * current_dir;
55007 struct linux_dirent __user * previous;
55008+ struct file * file;
55009 int count;
55010 int error;
55011 };
55012@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
55013 buf->error = -EOVERFLOW;
55014 return -EOVERFLOW;
55015 }
55016+
55017+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55018+ return 0;
55019+
55020 dirent = buf->previous;
55021 if (dirent) {
55022 if (__put_user(offset, &dirent->d_off))
55023@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55024 buf.previous = NULL;
55025 buf.count = count;
55026 buf.error = 0;
55027+ buf.file = f.file;
55028
55029 error = vfs_readdir(f.file, filldir, &buf);
55030 if (error >= 0)
55031@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
55032 struct getdents_callback64 {
55033 struct linux_dirent64 __user * current_dir;
55034 struct linux_dirent64 __user * previous;
55035+ struct file *file;
55036 int count;
55037 int error;
55038 };
55039@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
55040 buf->error = -EINVAL; /* only used if we fail.. */
55041 if (reclen > buf->count)
55042 return -EINVAL;
55043+
55044+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
55045+ return 0;
55046+
55047 dirent = buf->previous;
55048 if (dirent) {
55049 if (__put_user(offset, &dirent->d_off))
55050@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55051
55052 buf.current_dir = dirent;
55053 buf.previous = NULL;
55054+ buf.file = f.file;
55055 buf.count = count;
55056 buf.error = 0;
55057
55058@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
55059 error = buf.error;
55060 lastdirent = buf.previous;
55061 if (lastdirent) {
55062- typeof(lastdirent->d_off) d_off = f.file->f_pos;
55063+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
55064 if (__put_user(d_off, &lastdirent->d_off))
55065 error = -EFAULT;
55066 else
55067diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
55068index 2b7882b..1c5ef48 100644
55069--- a/fs/reiserfs/do_balan.c
55070+++ b/fs/reiserfs/do_balan.c
55071@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
55072 return;
55073 }
55074
55075- atomic_inc(&(fs_generation(tb->tb_sb)));
55076+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
55077 do_balance_starts(tb);
55078
55079 /* balance leaf returns 0 except if combining L R and S into
55080diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
55081index e60e870..f40ac16 100644
55082--- a/fs/reiserfs/procfs.c
55083+++ b/fs/reiserfs/procfs.c
55084@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
55085 "SMALL_TAILS " : "NO_TAILS ",
55086 replay_only(sb) ? "REPLAY_ONLY " : "",
55087 convert_reiserfs(sb) ? "CONV " : "",
55088- atomic_read(&r->s_generation_counter),
55089+ atomic_read_unchecked(&r->s_generation_counter),
55090 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
55091 SF(s_do_balance), SF(s_unneeded_left_neighbor),
55092 SF(s_good_search_by_key_reada), SF(s_bmaps),
55093diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
55094index 157e474..65a6114 100644
55095--- a/fs/reiserfs/reiserfs.h
55096+++ b/fs/reiserfs/reiserfs.h
55097@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
55098 /* Comment? -Hans */
55099 wait_queue_head_t s_wait;
55100 /* To be obsoleted soon by per buffer seals.. -Hans */
55101- atomic_t s_generation_counter; // increased by one every time the
55102+ atomic_unchecked_t s_generation_counter; // increased by one every time the
55103 // tree gets re-balanced
55104 unsigned long s_properties; /* File system properties. Currently holds
55105 on-disk FS format */
55106@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
55107 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
55108
55109 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
55110-#define get_generation(s) atomic_read (&fs_generation(s))
55111+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
55112 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
55113 #define __fs_changed(gen,s) (gen != get_generation (s))
55114 #define fs_changed(gen,s) \
55115diff --git a/fs/select.c b/fs/select.c
55116index 2ef72d9..f213b17 100644
55117--- a/fs/select.c
55118+++ b/fs/select.c
55119@@ -20,6 +20,7 @@
55120 #include <linux/export.h>
55121 #include <linux/slab.h>
55122 #include <linux/poll.h>
55123+#include <linux/security.h>
55124 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
55125 #include <linux/file.h>
55126 #include <linux/fdtable.h>
55127@@ -826,6 +827,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
55128 struct poll_list *walk = head;
55129 unsigned long todo = nfds;
55130
55131+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
55132 if (nfds > rlimit(RLIMIT_NOFILE))
55133 return -EINVAL;
55134
55135diff --git a/fs/seq_file.c b/fs/seq_file.c
55136index f2bc3df..239d4f6 100644
55137--- a/fs/seq_file.c
55138+++ b/fs/seq_file.c
55139@@ -10,6 +10,7 @@
55140 #include <linux/seq_file.h>
55141 #include <linux/slab.h>
55142 #include <linux/cred.h>
55143+#include <linux/sched.h>
55144
55145 #include <asm/uaccess.h>
55146 #include <asm/page.h>
55147@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
55148 #ifdef CONFIG_USER_NS
55149 p->user_ns = file->f_cred->user_ns;
55150 #endif
55151+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55152+ p->exec_id = current->exec_id;
55153+#endif
55154
55155 /*
55156 * Wrappers around seq_open(e.g. swaps_open) need to be
55157@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55158 return 0;
55159 }
55160 if (!m->buf) {
55161- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55162+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55163 if (!m->buf)
55164 return -ENOMEM;
55165 }
55166@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
55167 Eoverflow:
55168 m->op->stop(m, p);
55169 kfree(m->buf);
55170- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55171+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55172 return !m->buf ? -ENOMEM : -EAGAIN;
55173 }
55174
55175@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55176
55177 /* grab buffer if we didn't have one */
55178 if (!m->buf) {
55179- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
55180+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
55181 if (!m->buf)
55182 goto Enomem;
55183 }
55184@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
55185 goto Fill;
55186 m->op->stop(m, p);
55187 kfree(m->buf);
55188- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
55189+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
55190 if (!m->buf)
55191 goto Enomem;
55192 m->count = 0;
55193@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
55194 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
55195 void *data)
55196 {
55197- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
55198+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
55199 int res = -ENOMEM;
55200
55201 if (op) {
55202diff --git a/fs/splice.c b/fs/splice.c
55203index 6909d89..5b2e8f9 100644
55204--- a/fs/splice.c
55205+++ b/fs/splice.c
55206@@ -194,7 +194,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55207 pipe_lock(pipe);
55208
55209 for (;;) {
55210- if (!pipe->readers) {
55211+ if (!atomic_read(&pipe->readers)) {
55212 send_sig(SIGPIPE, current, 0);
55213 if (!ret)
55214 ret = -EPIPE;
55215@@ -248,9 +248,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
55216 do_wakeup = 0;
55217 }
55218
55219- pipe->waiting_writers++;
55220+ atomic_inc(&pipe->waiting_writers);
55221 pipe_wait(pipe);
55222- pipe->waiting_writers--;
55223+ atomic_dec(&pipe->waiting_writers);
55224 }
55225
55226 pipe_unlock(pipe);
55227@@ -563,7 +563,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
55228 old_fs = get_fs();
55229 set_fs(get_ds());
55230 /* The cast to a user pointer is valid due to the set_fs() */
55231- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
55232+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
55233 set_fs(old_fs);
55234
55235 return res;
55236@@ -578,7 +578,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
55237 old_fs = get_fs();
55238 set_fs(get_ds());
55239 /* The cast to a user pointer is valid due to the set_fs() */
55240- res = vfs_write(file, (const char __user *)buf, count, &pos);
55241+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
55242 set_fs(old_fs);
55243
55244 return res;
55245@@ -630,7 +630,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
55246 goto err;
55247
55248 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
55249- vec[i].iov_base = (void __user *) page_address(page);
55250+ vec[i].iov_base = (void __force_user *) page_address(page);
55251 vec[i].iov_len = this_len;
55252 spd.pages[i] = page;
55253 spd.nr_pages++;
55254@@ -851,10 +851,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
55255 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
55256 {
55257 while (!pipe->nrbufs) {
55258- if (!pipe->writers)
55259+ if (!atomic_read(&pipe->writers))
55260 return 0;
55261
55262- if (!pipe->waiting_writers && sd->num_spliced)
55263+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
55264 return 0;
55265
55266 if (sd->flags & SPLICE_F_NONBLOCK)
55267@@ -1189,7 +1189,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
55268 * out of the pipe right after the splice_to_pipe(). So set
55269 * PIPE_READERS appropriately.
55270 */
55271- pipe->readers = 1;
55272+ atomic_set(&pipe->readers, 1);
55273
55274 current->splice_pipe = pipe;
55275 }
55276@@ -1738,9 +1738,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55277 ret = -ERESTARTSYS;
55278 break;
55279 }
55280- if (!pipe->writers)
55281+ if (!atomic_read(&pipe->writers))
55282 break;
55283- if (!pipe->waiting_writers) {
55284+ if (!atomic_read(&pipe->waiting_writers)) {
55285 if (flags & SPLICE_F_NONBLOCK) {
55286 ret = -EAGAIN;
55287 break;
55288@@ -1772,7 +1772,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55289 pipe_lock(pipe);
55290
55291 while (pipe->nrbufs >= pipe->buffers) {
55292- if (!pipe->readers) {
55293+ if (!atomic_read(&pipe->readers)) {
55294 send_sig(SIGPIPE, current, 0);
55295 ret = -EPIPE;
55296 break;
55297@@ -1785,9 +1785,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
55298 ret = -ERESTARTSYS;
55299 break;
55300 }
55301- pipe->waiting_writers++;
55302+ atomic_inc(&pipe->waiting_writers);
55303 pipe_wait(pipe);
55304- pipe->waiting_writers--;
55305+ atomic_dec(&pipe->waiting_writers);
55306 }
55307
55308 pipe_unlock(pipe);
55309@@ -1823,14 +1823,14 @@ retry:
55310 pipe_double_lock(ipipe, opipe);
55311
55312 do {
55313- if (!opipe->readers) {
55314+ if (!atomic_read(&opipe->readers)) {
55315 send_sig(SIGPIPE, current, 0);
55316 if (!ret)
55317 ret = -EPIPE;
55318 break;
55319 }
55320
55321- if (!ipipe->nrbufs && !ipipe->writers)
55322+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
55323 break;
55324
55325 /*
55326@@ -1927,7 +1927,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55327 pipe_double_lock(ipipe, opipe);
55328
55329 do {
55330- if (!opipe->readers) {
55331+ if (!atomic_read(&opipe->readers)) {
55332 send_sig(SIGPIPE, current, 0);
55333 if (!ret)
55334 ret = -EPIPE;
55335@@ -1972,7 +1972,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
55336 * return EAGAIN if we have the potential of some data in the
55337 * future, otherwise just return 0
55338 */
55339- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
55340+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
55341 ret = -EAGAIN;
55342
55343 pipe_unlock(ipipe);
55344diff --git a/fs/stat.c b/fs/stat.c
55345index 14f4545..9b7f55b 100644
55346--- a/fs/stat.c
55347+++ b/fs/stat.c
55348@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
55349 stat->gid = inode->i_gid;
55350 stat->rdev = inode->i_rdev;
55351 stat->size = i_size_read(inode);
55352- stat->atime = inode->i_atime;
55353- stat->mtime = inode->i_mtime;
55354+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
55355+ stat->atime = inode->i_ctime;
55356+ stat->mtime = inode->i_ctime;
55357+ } else {
55358+ stat->atime = inode->i_atime;
55359+ stat->mtime = inode->i_mtime;
55360+ }
55361 stat->ctime = inode->i_ctime;
55362 stat->blksize = (1 << inode->i_blkbits);
55363 stat->blocks = inode->i_blocks;
55364@@ -46,8 +51,14 @@ int vfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55365 if (retval)
55366 return retval;
55367
55368- if (inode->i_op->getattr)
55369- return inode->i_op->getattr(mnt, dentry, stat);
55370+ if (inode->i_op->getattr) {
55371+ retval = inode->i_op->getattr(mnt, dentry, stat);
55372+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
55373+ stat->atime = stat->ctime;
55374+ stat->mtime = stat->ctime;
55375+ }
55376+ return retval;
55377+ }
55378
55379 generic_fillattr(inode, stat);
55380 return 0;
55381diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
55382index 2fbdff6..5530a61 100644
55383--- a/fs/sysfs/dir.c
55384+++ b/fs/sysfs/dir.c
55385@@ -685,6 +685,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
55386 struct sysfs_dirent *sd;
55387 int rc;
55388
55389+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
55390+ const char *parent_name = parent_sd->s_name;
55391+
55392+ mode = S_IFDIR | S_IRWXU;
55393+
55394+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
55395+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
55396+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
55397+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
55398+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
55399+#endif
55400+
55401 /* allocate */
55402 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
55403 if (!sd)
55404diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
55405index 602f56d..6853db8 100644
55406--- a/fs/sysfs/file.c
55407+++ b/fs/sysfs/file.c
55408@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
55409
55410 struct sysfs_open_dirent {
55411 atomic_t refcnt;
55412- atomic_t event;
55413+ atomic_unchecked_t event;
55414 wait_queue_head_t poll;
55415 struct list_head buffers; /* goes through sysfs_buffer.list */
55416 };
55417@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
55418 if (!sysfs_get_active(attr_sd))
55419 return -ENODEV;
55420
55421- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
55422+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
55423 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
55424
55425 sysfs_put_active(attr_sd);
55426@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
55427 return -ENOMEM;
55428
55429 atomic_set(&new_od->refcnt, 0);
55430- atomic_set(&new_od->event, 1);
55431+ atomic_set_unchecked(&new_od->event, 1);
55432 init_waitqueue_head(&new_od->poll);
55433 INIT_LIST_HEAD(&new_od->buffers);
55434 goto retry;
55435@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
55436
55437 sysfs_put_active(attr_sd);
55438
55439- if (buffer->event != atomic_read(&od->event))
55440+ if (buffer->event != atomic_read_unchecked(&od->event))
55441 goto trigger;
55442
55443 return DEFAULT_POLLMASK;
55444@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
55445
55446 od = sd->s_attr.open;
55447 if (od) {
55448- atomic_inc(&od->event);
55449+ atomic_inc_unchecked(&od->event);
55450 wake_up_interruptible(&od->poll);
55451 }
55452
55453diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
55454index 3c9eb56..9dea5be 100644
55455--- a/fs/sysfs/symlink.c
55456+++ b/fs/sysfs/symlink.c
55457@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
55458
55459 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
55460 {
55461- char *page = nd_get_link(nd);
55462+ const char *page = nd_get_link(nd);
55463 if (!IS_ERR(page))
55464 free_page((unsigned long)page);
55465 }
55466diff --git a/fs/udf/misc.c b/fs/udf/misc.c
55467index c175b4d..8f36a16 100644
55468--- a/fs/udf/misc.c
55469+++ b/fs/udf/misc.c
55470@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
55471
55472 u8 udf_tag_checksum(const struct tag *t)
55473 {
55474- u8 *data = (u8 *)t;
55475+ const u8 *data = (const u8 *)t;
55476 u8 checksum = 0;
55477 int i;
55478 for (i = 0; i < sizeof(struct tag); ++i)
55479diff --git a/fs/utimes.c b/fs/utimes.c
55480index f4fb7ec..3fe03c0 100644
55481--- a/fs/utimes.c
55482+++ b/fs/utimes.c
55483@@ -1,6 +1,7 @@
55484 #include <linux/compiler.h>
55485 #include <linux/file.h>
55486 #include <linux/fs.h>
55487+#include <linux/security.h>
55488 #include <linux/linkage.h>
55489 #include <linux/mount.h>
55490 #include <linux/namei.h>
55491@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
55492 goto mnt_drop_write_and_out;
55493 }
55494 }
55495+
55496+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
55497+ error = -EACCES;
55498+ goto mnt_drop_write_and_out;
55499+ }
55500+
55501 mutex_lock(&inode->i_mutex);
55502 error = notify_change(path->dentry, &newattrs);
55503 mutex_unlock(&inode->i_mutex);
55504diff --git a/fs/xattr.c b/fs/xattr.c
55505index 3377dff..4feded6 100644
55506--- a/fs/xattr.c
55507+++ b/fs/xattr.c
55508@@ -319,7 +319,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
55509 * Extended attribute SET operations
55510 */
55511 static long
55512-setxattr(struct dentry *d, const char __user *name, const void __user *value,
55513+setxattr(struct path *path, const char __user *name, const void __user *value,
55514 size_t size, int flags)
55515 {
55516 int error;
55517@@ -355,7 +355,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
55518 posix_acl_fix_xattr_from_user(kvalue, size);
55519 }
55520
55521- error = vfs_setxattr(d, kname, kvalue, size, flags);
55522+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
55523+ error = -EACCES;
55524+ goto out;
55525+ }
55526+
55527+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
55528 out:
55529 if (vvalue)
55530 vfree(vvalue);
55531@@ -377,7 +382,7 @@ retry:
55532 return error;
55533 error = mnt_want_write(path.mnt);
55534 if (!error) {
55535- error = setxattr(path.dentry, name, value, size, flags);
55536+ error = setxattr(&path, name, value, size, flags);
55537 mnt_drop_write(path.mnt);
55538 }
55539 path_put(&path);
55540@@ -401,7 +406,7 @@ retry:
55541 return error;
55542 error = mnt_want_write(path.mnt);
55543 if (!error) {
55544- error = setxattr(path.dentry, name, value, size, flags);
55545+ error = setxattr(&path, name, value, size, flags);
55546 mnt_drop_write(path.mnt);
55547 }
55548 path_put(&path);
55549@@ -416,16 +421,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
55550 const void __user *,value, size_t, size, int, flags)
55551 {
55552 struct fd f = fdget(fd);
55553- struct dentry *dentry;
55554 int error = -EBADF;
55555
55556 if (!f.file)
55557 return error;
55558- dentry = f.file->f_path.dentry;
55559- audit_inode(NULL, dentry, 0);
55560+ audit_inode(NULL, f.file->f_path.dentry, 0);
55561 error = mnt_want_write_file(f.file);
55562 if (!error) {
55563- error = setxattr(dentry, name, value, size, flags);
55564+ error = setxattr(&f.file->f_path, name, value, size, flags);
55565 mnt_drop_write_file(f.file);
55566 }
55567 fdput(f);
55568diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
55569index 9fbea87..6b19972 100644
55570--- a/fs/xattr_acl.c
55571+++ b/fs/xattr_acl.c
55572@@ -76,8 +76,8 @@ struct posix_acl *
55573 posix_acl_from_xattr(struct user_namespace *user_ns,
55574 const void *value, size_t size)
55575 {
55576- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
55577- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
55578+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
55579+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
55580 int count;
55581 struct posix_acl *acl;
55582 struct posix_acl_entry *acl_e;
55583diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
55584index 572a858..12a9b0d 100644
55585--- a/fs/xfs/xfs_bmap.c
55586+++ b/fs/xfs/xfs_bmap.c
55587@@ -192,7 +192,7 @@ xfs_bmap_validate_ret(
55588 int nmap,
55589 int ret_nmap);
55590 #else
55591-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
55592+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
55593 #endif /* DEBUG */
55594
55595 STATIC int
55596diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
55597index 1b9fc3e..e1bdde0 100644
55598--- a/fs/xfs/xfs_dir2_sf.c
55599+++ b/fs/xfs/xfs_dir2_sf.c
55600@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
55601 }
55602
55603 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
55604- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
55605+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
55606+ char name[sfep->namelen];
55607+ memcpy(name, sfep->name, sfep->namelen);
55608+ if (filldir(dirent, name, sfep->namelen,
55609+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
55610+ *offset = off & 0x7fffffff;
55611+ return 0;
55612+ }
55613+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
55614 off & 0x7fffffff, ino, DT_UNKNOWN)) {
55615 *offset = off & 0x7fffffff;
55616 return 0;
55617diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
55618index c1c3ef8..0952438 100644
55619--- a/fs/xfs/xfs_ioctl.c
55620+++ b/fs/xfs/xfs_ioctl.c
55621@@ -127,7 +127,7 @@ xfs_find_handle(
55622 }
55623
55624 error = -EFAULT;
55625- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
55626+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
55627 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
55628 goto out_put;
55629
55630diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
55631index d82efaa..0904a8e 100644
55632--- a/fs/xfs/xfs_iops.c
55633+++ b/fs/xfs/xfs_iops.c
55634@@ -395,7 +395,7 @@ xfs_vn_put_link(
55635 struct nameidata *nd,
55636 void *p)
55637 {
55638- char *s = nd_get_link(nd);
55639+ const char *s = nd_get_link(nd);
55640
55641 if (!IS_ERR(s))
55642 kfree(s);
55643diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
55644new file mode 100644
55645index 0000000..92247e4
55646--- /dev/null
55647+++ b/grsecurity/Kconfig
55648@@ -0,0 +1,1021 @@
55649+#
55650+# grecurity configuration
55651+#
55652+menu "Memory Protections"
55653+depends on GRKERNSEC
55654+
55655+config GRKERNSEC_KMEM
55656+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55657+ default y if GRKERNSEC_CONFIG_AUTO
55658+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55659+ help
55660+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55661+ be written to or read from to modify or leak the contents of the running
55662+ kernel. /dev/port will also not be allowed to be opened and support
55663+ for /dev/cpu/*/msr will be removed. If you have module
55664+ support disabled, enabling this will close up five ways that are
55665+ currently used to insert malicious code into the running kernel.
55666+
55667+ Even with all these features enabled, we still highly recommend that
55668+ you use the RBAC system, as it is still possible for an attacker to
55669+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55670+
55671+ If you are not using XFree86, you may be able to stop this additional
55672+ case by enabling the 'Disable privileged I/O' option. Though nothing
55673+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55674+ but only to video memory, which is the only writing we allow in this
55675+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55676+ not be allowed to mprotect it with PROT_WRITE later.
55677+ Enabling this feature will prevent the "cpupower" and "powertop" tools
55678+ from working.
55679+
55680+ It is highly recommended that you say Y here if you meet all the
55681+ conditions above.
55682+
55683+config GRKERNSEC_VM86
55684+ bool "Restrict VM86 mode"
55685+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
55686+ depends on X86_32
55687+
55688+ help
55689+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55690+ make use of a special execution mode on 32bit x86 processors called
55691+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55692+ video cards and will still work with this option enabled. The purpose
55693+ of the option is to prevent exploitation of emulation errors in
55694+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55695+ Nearly all users should be able to enable this option.
55696+
55697+config GRKERNSEC_IO
55698+ bool "Disable privileged I/O"
55699+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
55700+ depends on X86
55701+ select RTC_CLASS
55702+ select RTC_INTF_DEV
55703+ select RTC_DRV_CMOS
55704+
55705+ help
55706+ If you say Y here, all ioperm and iopl calls will return an error.
55707+ Ioperm and iopl can be used to modify the running kernel.
55708+ Unfortunately, some programs need this access to operate properly,
55709+ the most notable of which are XFree86 and hwclock. hwclock can be
55710+ remedied by having RTC support in the kernel, so real-time
55711+ clock support is enabled if this option is enabled, to ensure
55712+ that hwclock operates correctly. XFree86 still will not
55713+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55714+ IF YOU USE XFree86. If you use XFree86 and you still want to
55715+ protect your kernel against modification, use the RBAC system.
55716+
55717+config GRKERNSEC_JIT_HARDEN
55718+ bool "Harden BPF JIT against spray attacks"
55719+ default y if GRKERNSEC_CONFIG_AUTO
55720+ depends on BPF_JIT
55721+ help
55722+ If you say Y here, the native code generated by the kernel's Berkeley
55723+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
55724+ attacks that attempt to fit attacker-beneficial instructions in
55725+ 32bit immediate fields of JIT-generated native instructions. The
55726+ attacker will generally aim to cause an unintended instruction sequence
55727+ of JIT-generated native code to execute by jumping into the middle of
55728+ a generated instruction. This feature effectively randomizes the 32bit
55729+ immediate constants present in the generated code to thwart such attacks.
55730+
55731+ If you're using KERNEXEC, it's recommended that you enable this option
55732+ to supplement the hardening of the kernel.
55733+
55734+config GRKERNSEC_RAND_THREADSTACK
55735+ bool "Insert random gaps between thread stacks"
55736+ default y if GRKERNSEC_CONFIG_AUTO
55737+ depends on PAX_RANDMMAP && !PPC
55738+ help
55739+ If you say Y here, a random-sized gap will be enforced between allocated
55740+ thread stacks. Glibc's NPTL and other threading libraries that
55741+ pass MAP_STACK to the kernel for thread stack allocation are supported.
55742+ The implementation currently provides 8 bits of entropy for the gap.
55743+
55744+ Many distributions do not compile threaded remote services with the
55745+ -fstack-check argument to GCC, causing the variable-sized stack-based
55746+ allocator, alloca(), to not probe the stack on allocation. This
55747+ permits an unbounded alloca() to skip over any guard page and potentially
55748+ modify another thread's stack reliably. An enforced random gap
55749+ reduces the reliability of such an attack and increases the chance
55750+ that such a read/write to another thread's stack instead lands in
55751+ an unmapped area, causing a crash and triggering grsecurity's
55752+ anti-bruteforcing logic.
55753+
55754+config GRKERNSEC_PROC_MEMMAP
55755+ bool "Harden ASLR against information leaks and entropy reduction"
55756+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
55757+ depends on PAX_NOEXEC || PAX_ASLR
55758+ help
55759+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55760+ give no information about the addresses of its mappings if
55761+ PaX features that rely on random addresses are enabled on the task.
55762+ In addition to sanitizing this information and disabling other
55763+ dangerous sources of information, this option causes reads of sensitive
55764+ /proc/<pid> entries where the file descriptor was opened in a different
55765+ task than the one performing the read. Such attempts are logged.
55766+ This option also limits argv/env strings for suid/sgid binaries
55767+ to 512KB to prevent a complete exhaustion of the stack entropy provided
55768+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
55769+ binaries to prevent alternative mmap layouts from being abused.
55770+
55771+ If you use PaX it is essential that you say Y here as it closes up
55772+ several holes that make full ASLR useless locally.
55773+
55774+config GRKERNSEC_BRUTE
55775+ bool "Deter exploit bruteforcing"
55776+ default y if GRKERNSEC_CONFIG_AUTO
55777+ help
55778+ If you say Y here, attempts to bruteforce exploits against forking
55779+ daemons such as apache or sshd, as well as against suid/sgid binaries
55780+ will be deterred. When a child of a forking daemon is killed by PaX
55781+ or crashes due to an illegal instruction or other suspicious signal,
55782+ the parent process will be delayed 30 seconds upon every subsequent
55783+ fork until the administrator is able to assess the situation and
55784+ restart the daemon.
55785+ In the suid/sgid case, the attempt is logged, the user has all their
55786+ processes terminated, and they are prevented from executing any further
55787+ processes for 15 minutes.
55788+ It is recommended that you also enable signal logging in the auditing
55789+ section so that logs are generated when a process triggers a suspicious
55790+ signal.
55791+ If the sysctl option is enabled, a sysctl option with name
55792+ "deter_bruteforce" is created.
55793+
55794+
55795+config GRKERNSEC_MODHARDEN
55796+ bool "Harden module auto-loading"
55797+ default y if GRKERNSEC_CONFIG_AUTO
55798+ depends on MODULES
55799+ help
55800+ If you say Y here, module auto-loading in response to use of some
55801+ feature implemented by an unloaded module will be restricted to
55802+ root users. Enabling this option helps defend against attacks
55803+ by unprivileged users who abuse the auto-loading behavior to
55804+ cause a vulnerable module to load that is then exploited.
55805+
55806+ If this option prevents a legitimate use of auto-loading for a
55807+ non-root user, the administrator can execute modprobe manually
55808+ with the exact name of the module mentioned in the alert log.
55809+ Alternatively, the administrator can add the module to the list
55810+ of modules loaded at boot by modifying init scripts.
55811+
55812+ Modification of init scripts will most likely be needed on
55813+ Ubuntu servers with encrypted home directory support enabled,
55814+ as the first non-root user logging in will cause the ecb(aes),
55815+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55816+
55817+config GRKERNSEC_HIDESYM
55818+ bool "Hide kernel symbols"
55819+ default y if GRKERNSEC_CONFIG_AUTO
55820+ select PAX_USERCOPY_SLABS
55821+ help
55822+ If you say Y here, getting information on loaded modules, and
55823+ displaying all kernel symbols through a syscall will be restricted
55824+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55825+ /proc/kallsyms will be restricted to the root user. The RBAC
55826+ system can hide that entry even from root.
55827+
55828+ This option also prevents leaking of kernel addresses through
55829+ several /proc entries.
55830+
55831+ Note that this option is only effective provided the following
55832+ conditions are met:
55833+ 1) The kernel using grsecurity is not precompiled by some distribution
55834+ 2) You have also enabled GRKERNSEC_DMESG
55835+ 3) You are using the RBAC system and hiding other files such as your
55836+ kernel image and System.map. Alternatively, enabling this option
55837+ causes the permissions on /boot, /lib/modules, and the kernel
55838+ source directory to change at compile time to prevent
55839+ reading by non-root users.
55840+ If the above conditions are met, this option will aid in providing a
55841+ useful protection against local kernel exploitation of overflows
55842+ and arbitrary read/write vulnerabilities.
55843+
55844+config GRKERNSEC_KERN_LOCKOUT
55845+ bool "Active kernel exploit response"
55846+ default y if GRKERNSEC_CONFIG_AUTO
55847+ depends on X86 || ARM || PPC || SPARC
55848+ help
55849+ If you say Y here, when a PaX alert is triggered due to suspicious
55850+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55851+ or an OOPS occurs due to bad memory accesses, instead of just
55852+ terminating the offending process (and potentially allowing
55853+ a subsequent exploit from the same user), we will take one of two
55854+ actions:
55855+ If the user was root, we will panic the system
55856+ If the user was non-root, we will log the attempt, terminate
55857+ all processes owned by the user, then prevent them from creating
55858+ any new processes until the system is restarted
55859+ This deters repeated kernel exploitation/bruteforcing attempts
55860+ and is useful for later forensics.
55861+
55862+endmenu
55863+menu "Role Based Access Control Options"
55864+depends on GRKERNSEC
55865+
55866+config GRKERNSEC_RBAC_DEBUG
55867+ bool
55868+
55869+config GRKERNSEC_NO_RBAC
55870+ bool "Disable RBAC system"
55871+ help
55872+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55873+ preventing the RBAC system from being enabled. You should only say Y
55874+ here if you have no intention of using the RBAC system, so as to prevent
55875+ an attacker with root access from misusing the RBAC system to hide files
55876+ and processes when loadable module support and /dev/[k]mem have been
55877+ locked down.
55878+
55879+config GRKERNSEC_ACL_HIDEKERN
55880+ bool "Hide kernel processes"
55881+ help
55882+ If you say Y here, all kernel threads will be hidden to all
55883+ processes but those whose subject has the "view hidden processes"
55884+ flag.
55885+
55886+config GRKERNSEC_ACL_MAXTRIES
55887+ int "Maximum tries before password lockout"
55888+ default 3
55889+ help
55890+ This option enforces the maximum number of times a user can attempt
55891+ to authorize themselves with the grsecurity RBAC system before being
55892+ denied the ability to attempt authorization again for a specified time.
55893+ The lower the number, the harder it will be to brute-force a password.
55894+
55895+config GRKERNSEC_ACL_TIMEOUT
55896+ int "Time to wait after max password tries, in seconds"
55897+ default 30
55898+ help
55899+ This option specifies the time the user must wait after attempting to
55900+ authorize to the RBAC system with the maximum number of invalid
55901+ passwords. The higher the number, the harder it will be to brute-force
55902+ a password.
55903+
55904+endmenu
55905+menu "Filesystem Protections"
55906+depends on GRKERNSEC
55907+
55908+config GRKERNSEC_PROC
55909+ bool "Proc restrictions"
55910+ default y if GRKERNSEC_CONFIG_AUTO
55911+ help
55912+ If you say Y here, the permissions of the /proc filesystem
55913+ will be altered to enhance system security and privacy. You MUST
55914+ choose either a user only restriction or a user and group restriction.
55915+ Depending upon the option you choose, you can either restrict users to
55916+ see only the processes they themselves run, or choose a group that can
55917+ view all processes and files normally restricted to root if you choose
55918+ the "restrict to user only" option. NOTE: If you're running identd or
55919+ ntpd as a non-root user, you will have to run it as the group you
55920+ specify here.
55921+
55922+config GRKERNSEC_PROC_USER
55923+ bool "Restrict /proc to user only"
55924+ depends on GRKERNSEC_PROC
55925+ help
55926+ If you say Y here, non-root users will only be able to view their own
55927+ processes, and restricts them from viewing network-related information,
55928+ and viewing kernel symbol and module information.
55929+
55930+config GRKERNSEC_PROC_USERGROUP
55931+ bool "Allow special group"
55932+ default y if GRKERNSEC_CONFIG_AUTO
55933+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55934+ help
55935+ If you say Y here, you will be able to select a group that will be
55936+ able to view all processes and network-related information. If you've
55937+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55938+ remain hidden. This option is useful if you want to run identd as
55939+ a non-root user. The group you select may also be chosen at boot time
55940+ via "grsec_proc_gid=" on the kernel commandline.
55941+
55942+config GRKERNSEC_PROC_GID
55943+ int "GID for special group"
55944+ depends on GRKERNSEC_PROC_USERGROUP
55945+ default 1001
55946+
55947+config GRKERNSEC_PROC_ADD
55948+ bool "Additional restrictions"
55949+ default y if GRKERNSEC_CONFIG_AUTO
55950+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55951+ help
55952+ If you say Y here, additional restrictions will be placed on
55953+ /proc that keep normal users from viewing device information and
55954+ slabinfo information that could be useful for exploits.
55955+
55956+config GRKERNSEC_LINK
55957+ bool "Linking restrictions"
55958+ default y if GRKERNSEC_CONFIG_AUTO
55959+ help
55960+ If you say Y here, /tmp race exploits will be prevented, since users
55961+ will no longer be able to follow symlinks owned by other users in
55962+ world-writable +t directories (e.g. /tmp), unless the owner of the
55963+ symlink is the owner of the directory. users will also not be
55964+ able to hardlink to files they do not own. If the sysctl option is
55965+ enabled, a sysctl option with name "linking_restrictions" is created.
55966+
55967+config GRKERNSEC_SYMLINKOWN
55968+ bool "Kernel-enforced SymlinksIfOwnerMatch"
55969+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
55970+ help
55971+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
55972+ that prevents it from being used as a security feature. As Apache
55973+ verifies the symlink by performing a stat() against the target of
55974+ the symlink before it is followed, an attacker can setup a symlink
55975+ to point to a same-owned file, then replace the symlink with one
55976+ that targets another user's file just after Apache "validates" the
55977+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
55978+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
55979+ will be in place for the group you specify. If the sysctl option
55980+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
55981+ created.
55982+
55983+config GRKERNSEC_SYMLINKOWN_GID
55984+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
55985+ depends on GRKERNSEC_SYMLINKOWN
55986+ default 1006
55987+ help
55988+ Setting this GID determines what group kernel-enforced
55989+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
55990+ is enabled, a sysctl option with name "symlinkown_gid" is created.
55991+
55992+config GRKERNSEC_FIFO
55993+ bool "FIFO restrictions"
55994+ default y if GRKERNSEC_CONFIG_AUTO
55995+ help
55996+ If you say Y here, users will not be able to write to FIFOs they don't
55997+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55998+ the FIFO is the same owner of the directory it's held in. If the sysctl
55999+ option is enabled, a sysctl option with name "fifo_restrictions" is
56000+ created.
56001+
56002+config GRKERNSEC_SYSFS_RESTRICT
56003+ bool "Sysfs/debugfs restriction"
56004+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
56005+ depends on SYSFS
56006+ help
56007+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56008+ any filesystem normally mounted under it (e.g. debugfs) will be
56009+ mostly accessible only by root. These filesystems generally provide access
56010+ to hardware and debug information that isn't appropriate for unprivileged
56011+ users of the system. Sysfs and debugfs have also become a large source
56012+ of new vulnerabilities, ranging from infoleaks to local compromise.
56013+ There has been very little oversight with an eye toward security involved
56014+ in adding new exporters of information to these filesystems, so their
56015+ use is discouraged.
56016+ For reasons of compatibility, a few directories have been whitelisted
56017+ for access by non-root users:
56018+ /sys/fs/selinux
56019+ /sys/fs/fuse
56020+ /sys/devices/system/cpu
56021+
56022+config GRKERNSEC_ROFS
56023+ bool "Runtime read-only mount protection"
56024+ help
56025+ If you say Y here, a sysctl option with name "romount_protect" will
56026+ be created. By setting this option to 1 at runtime, filesystems
56027+ will be protected in the following ways:
56028+ * No new writable mounts will be allowed
56029+ * Existing read-only mounts won't be able to be remounted read/write
56030+ * Write operations will be denied on all block devices
56031+ This option acts independently of grsec_lock: once it is set to 1,
56032+ it cannot be turned off. Therefore, please be mindful of the resulting
56033+ behavior if this option is enabled in an init script on a read-only
56034+ filesystem. This feature is mainly intended for secure embedded systems.
56035+
56036+config GRKERNSEC_DEVICE_SIDECHANNEL
56037+ bool "Eliminate stat/notify-based device sidechannels"
56038+ default y if GRKERNSEC_CONFIG_AUTO
56039+ help
56040+ If you say Y here, timing analyses on block or character
56041+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
56042+ will be thwarted for unprivileged users. If a process without
56043+ CAP_MKNOD stats such a device, the last access and last modify times
56044+ will match the device's create time. No access or modify events
56045+ will be triggered through inotify/dnotify/fanotify for such devices.
56046+ This feature will prevent attacks that may at a minimum
56047+ allow an attacker to determine the administrator's password length.
56048+
56049+config GRKERNSEC_CHROOT
56050+ bool "Chroot jail restrictions"
56051+ default y if GRKERNSEC_CONFIG_AUTO
56052+ help
56053+ If you say Y here, you will be able to choose several options that will
56054+ make breaking out of a chrooted jail much more difficult. If you
56055+ encounter no software incompatibilities with the following options, it
56056+ is recommended that you enable each one.
56057+
56058+config GRKERNSEC_CHROOT_MOUNT
56059+ bool "Deny mounts"
56060+ default y if GRKERNSEC_CONFIG_AUTO
56061+ depends on GRKERNSEC_CHROOT
56062+ help
56063+ If you say Y here, processes inside a chroot will not be able to
56064+ mount or remount filesystems. If the sysctl option is enabled, a
56065+ sysctl option with name "chroot_deny_mount" is created.
56066+
56067+config GRKERNSEC_CHROOT_DOUBLE
56068+ bool "Deny double-chroots"
56069+ default y if GRKERNSEC_CONFIG_AUTO
56070+ depends on GRKERNSEC_CHROOT
56071+ help
56072+ If you say Y here, processes inside a chroot will not be able to chroot
56073+ again outside the chroot. This is a widely used method of breaking
56074+ out of a chroot jail and should not be allowed. If the sysctl
56075+ option is enabled, a sysctl option with name
56076+ "chroot_deny_chroot" is created.
56077+
56078+config GRKERNSEC_CHROOT_PIVOT
56079+ bool "Deny pivot_root in chroot"
56080+ default y if GRKERNSEC_CONFIG_AUTO
56081+ depends on GRKERNSEC_CHROOT
56082+ help
56083+ If you say Y here, processes inside a chroot will not be able to use
56084+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56085+ works similar to chroot in that it changes the root filesystem. This
56086+ function could be misused in a chrooted process to attempt to break out
56087+ of the chroot, and therefore should not be allowed. If the sysctl
56088+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56089+ created.
56090+
56091+config GRKERNSEC_CHROOT_CHDIR
56092+ bool "Enforce chdir(\"/\") on all chroots"
56093+ default y if GRKERNSEC_CONFIG_AUTO
56094+ depends on GRKERNSEC_CHROOT
56095+ help
56096+ If you say Y here, the current working directory of all newly-chrooted
56097+ applications will be set to the the root directory of the chroot.
56098+ The man page on chroot(2) states:
56099+ Note that this call does not change the current working
56100+ directory, so that `.' can be outside the tree rooted at
56101+ `/'. In particular, the super-user can escape from a
56102+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56103+
56104+ It is recommended that you say Y here, since it's not known to break
56105+ any software. If the sysctl option is enabled, a sysctl option with
56106+ name "chroot_enforce_chdir" is created.
56107+
56108+config GRKERNSEC_CHROOT_CHMOD
56109+ bool "Deny (f)chmod +s"
56110+ default y if GRKERNSEC_CONFIG_AUTO
56111+ depends on GRKERNSEC_CHROOT
56112+ help
56113+ If you say Y here, processes inside a chroot will not be able to chmod
56114+ or fchmod files to make them have suid or sgid bits. This protects
56115+ against another published method of breaking a chroot. If the sysctl
56116+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56117+ created.
56118+
56119+config GRKERNSEC_CHROOT_FCHDIR
56120+ bool "Deny fchdir out of chroot"
56121+ default y if GRKERNSEC_CONFIG_AUTO
56122+ depends on GRKERNSEC_CHROOT
56123+ help
56124+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56125+ to a file descriptor of the chrooting process that points to a directory
56126+ outside the filesystem will be stopped. If the sysctl option
56127+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56128+
56129+config GRKERNSEC_CHROOT_MKNOD
56130+ bool "Deny mknod"
56131+ default y if GRKERNSEC_CONFIG_AUTO
56132+ depends on GRKERNSEC_CHROOT
56133+ help
56134+ If you say Y here, processes inside a chroot will not be allowed to
56135+ mknod. The problem with using mknod inside a chroot is that it
56136+ would allow an attacker to create a device entry that is the same
56137+ as one on the physical root of your system, which could range from
56138+ anything from the console device to a device for your harddrive (which
56139+ they could then use to wipe the drive or steal data). It is recommended
56140+ that you say Y here, unless you run into software incompatibilities.
56141+ If the sysctl option is enabled, a sysctl option with name
56142+ "chroot_deny_mknod" is created.
56143+
56144+config GRKERNSEC_CHROOT_SHMAT
56145+ bool "Deny shmat() out of chroot"
56146+ default y if GRKERNSEC_CONFIG_AUTO
56147+ depends on GRKERNSEC_CHROOT
56148+ help
56149+ If you say Y here, processes inside a chroot will not be able to attach
56150+ to shared memory segments that were created outside of the chroot jail.
56151+ It is recommended that you say Y here. If the sysctl option is enabled,
56152+ a sysctl option with name "chroot_deny_shmat" is created.
56153+
56154+config GRKERNSEC_CHROOT_UNIX
56155+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56156+ default y if GRKERNSEC_CONFIG_AUTO
56157+ depends on GRKERNSEC_CHROOT
56158+ help
56159+ If you say Y here, processes inside a chroot will not be able to
56160+ connect to abstract (meaning not belonging to a filesystem) Unix
56161+ domain sockets that were bound outside of a chroot. It is recommended
56162+ that you say Y here. If the sysctl option is enabled, a sysctl option
56163+ with name "chroot_deny_unix" is created.
56164+
56165+config GRKERNSEC_CHROOT_FINDTASK
56166+ bool "Protect outside processes"
56167+ default y if GRKERNSEC_CONFIG_AUTO
56168+ depends on GRKERNSEC_CHROOT
56169+ help
56170+ If you say Y here, processes inside a chroot will not be able to
56171+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56172+ getsid, or view any process outside of the chroot. If the sysctl
56173+ option is enabled, a sysctl option with name "chroot_findtask" is
56174+ created.
56175+
56176+config GRKERNSEC_CHROOT_NICE
56177+ bool "Restrict priority changes"
56178+ default y if GRKERNSEC_CONFIG_AUTO
56179+ depends on GRKERNSEC_CHROOT
56180+ help
56181+ If you say Y here, processes inside a chroot will not be able to raise
56182+ the priority of processes in the chroot, or alter the priority of
56183+ processes outside the chroot. This provides more security than simply
56184+ removing CAP_SYS_NICE from the process' capability set. If the
56185+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56186+ is created.
56187+
56188+config GRKERNSEC_CHROOT_SYSCTL
56189+ bool "Deny sysctl writes"
56190+ default y if GRKERNSEC_CONFIG_AUTO
56191+ depends on GRKERNSEC_CHROOT
56192+ help
56193+ If you say Y here, an attacker in a chroot will not be able to
56194+ write to sysctl entries, either by sysctl(2) or through a /proc
56195+ interface. It is strongly recommended that you say Y here. If the
56196+ sysctl option is enabled, a sysctl option with name
56197+ "chroot_deny_sysctl" is created.
56198+
56199+config GRKERNSEC_CHROOT_CAPS
56200+ bool "Capability restrictions"
56201+ default y if GRKERNSEC_CONFIG_AUTO
56202+ depends on GRKERNSEC_CHROOT
56203+ help
56204+ If you say Y here, the capabilities on all processes within a
56205+ chroot jail will be lowered to stop module insertion, raw i/o,
56206+ system and net admin tasks, rebooting the system, modifying immutable
56207+ files, modifying IPC owned by another, and changing the system time.
56208+ This is left an option because it can break some apps. Disable this
56209+ if your chrooted apps are having problems performing those kinds of
56210+ tasks. If the sysctl option is enabled, a sysctl option with
56211+ name "chroot_caps" is created.
56212+
56213+endmenu
56214+menu "Kernel Auditing"
56215+depends on GRKERNSEC
56216+
56217+config GRKERNSEC_AUDIT_GROUP
56218+ bool "Single group for auditing"
56219+ help
56220+ If you say Y here, the exec and chdir logging features will only operate
56221+ on a group you specify. This option is recommended if you only want to
56222+ watch certain users instead of having a large amount of logs from the
56223+ entire system. If the sysctl option is enabled, a sysctl option with
56224+ name "audit_group" is created.
56225+
56226+config GRKERNSEC_AUDIT_GID
56227+ int "GID for auditing"
56228+ depends on GRKERNSEC_AUDIT_GROUP
56229+ default 1007
56230+
56231+config GRKERNSEC_EXECLOG
56232+ bool "Exec logging"
56233+ help
56234+ If you say Y here, all execve() calls will be logged (since the
56235+ other exec*() calls are frontends to execve(), all execution
56236+ will be logged). Useful for shell-servers that like to keep track
56237+ of their users. If the sysctl option is enabled, a sysctl option with
56238+ name "exec_logging" is created.
56239+ WARNING: This option when enabled will produce a LOT of logs, especially
56240+ on an active system.
56241+
56242+config GRKERNSEC_RESLOG
56243+ bool "Resource logging"
56244+ default y if GRKERNSEC_CONFIG_AUTO
56245+ help
56246+ If you say Y here, all attempts to overstep resource limits will
56247+ be logged with the resource name, the requested size, and the current
56248+ limit. It is highly recommended that you say Y here. If the sysctl
56249+ option is enabled, a sysctl option with name "resource_logging" is
56250+ created. If the RBAC system is enabled, the sysctl value is ignored.
56251+
56252+config GRKERNSEC_CHROOT_EXECLOG
56253+ bool "Log execs within chroot"
56254+ help
56255+ If you say Y here, all executions inside a chroot jail will be logged
56256+ to syslog. This can cause a large amount of logs if certain
56257+ applications (eg. djb's daemontools) are installed on the system, and
56258+ is therefore left as an option. If the sysctl option is enabled, a
56259+ sysctl option with name "chroot_execlog" is created.
56260+
56261+config GRKERNSEC_AUDIT_PTRACE
56262+ bool "Ptrace logging"
56263+ help
56264+ If you say Y here, all attempts to attach to a process via ptrace
56265+ will be logged. If the sysctl option is enabled, a sysctl option
56266+ with name "audit_ptrace" is created.
56267+
56268+config GRKERNSEC_AUDIT_CHDIR
56269+ bool "Chdir logging"
56270+ help
56271+ If you say Y here, all chdir() calls will be logged. If the sysctl
56272+ option is enabled, a sysctl option with name "audit_chdir" is created.
56273+
56274+config GRKERNSEC_AUDIT_MOUNT
56275+ bool "(Un)Mount logging"
56276+ help
56277+ If you say Y here, all mounts and unmounts will be logged. If the
56278+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56279+ created.
56280+
56281+config GRKERNSEC_SIGNAL
56282+ bool "Signal logging"
56283+ default y if GRKERNSEC_CONFIG_AUTO
56284+ help
56285+ If you say Y here, certain important signals will be logged, such as
56286+ SIGSEGV, which will as a result inform you of when a error in a program
56287+ occurred, which in some cases could mean a possible exploit attempt.
56288+ If the sysctl option is enabled, a sysctl option with name
56289+ "signal_logging" is created.
56290+
56291+config GRKERNSEC_FORKFAIL
56292+ bool "Fork failure logging"
56293+ help
56294+ If you say Y here, all failed fork() attempts will be logged.
56295+ This could suggest a fork bomb, or someone attempting to overstep
56296+ their process limit. If the sysctl option is enabled, a sysctl option
56297+ with name "forkfail_logging" is created.
56298+
56299+config GRKERNSEC_TIME
56300+ bool "Time change logging"
56301+ default y if GRKERNSEC_CONFIG_AUTO
56302+ help
56303+ If you say Y here, any changes of the system clock will be logged.
56304+ If the sysctl option is enabled, a sysctl option with name
56305+ "timechange_logging" is created.
56306+
56307+config GRKERNSEC_PROC_IPADDR
56308+ bool "/proc/<pid>/ipaddr support"
56309+ default y if GRKERNSEC_CONFIG_AUTO
56310+ help
56311+ If you say Y here, a new entry will be added to each /proc/<pid>
56312+ directory that contains the IP address of the person using the task.
56313+ The IP is carried across local TCP and AF_UNIX stream sockets.
56314+ This information can be useful for IDS/IPSes to perform remote response
56315+ to a local attack. The entry is readable by only the owner of the
56316+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56317+ the RBAC system), and thus does not create privacy concerns.
56318+
56319+config GRKERNSEC_RWXMAP_LOG
56320+ bool 'Denied RWX mmap/mprotect logging'
56321+ default y if GRKERNSEC_CONFIG_AUTO
56322+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56323+ help
56324+ If you say Y here, calls to mmap() and mprotect() with explicit
56325+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56326+ denied by the PAX_MPROTECT feature. If the sysctl option is
56327+ enabled, a sysctl option with name "rwxmap_logging" is created.
56328+
56329+config GRKERNSEC_AUDIT_TEXTREL
56330+ bool 'ELF text relocations logging (READ HELP)'
56331+ depends on PAX_MPROTECT
56332+ help
56333+ If you say Y here, text relocations will be logged with the filename
56334+ of the offending library or binary. The purpose of the feature is
56335+ to help Linux distribution developers get rid of libraries and
56336+ binaries that need text relocations which hinder the future progress
56337+ of PaX. Only Linux distribution developers should say Y here, and
56338+ never on a production machine, as this option creates an information
56339+ leak that could aid an attacker in defeating the randomization of
56340+ a single memory region. If the sysctl option is enabled, a sysctl
56341+ option with name "audit_textrel" is created.
56342+
56343+endmenu
56344+
56345+menu "Executable Protections"
56346+depends on GRKERNSEC
56347+
56348+config GRKERNSEC_DMESG
56349+ bool "Dmesg(8) restriction"
56350+ default y if GRKERNSEC_CONFIG_AUTO
56351+ help
56352+ If you say Y here, non-root users will not be able to use dmesg(8)
56353+ to view the contents of the kernel's circular log buffer.
56354+ The kernel's log buffer often contains kernel addresses and other
56355+ identifying information useful to an attacker in fingerprinting a
56356+ system for a targeted exploit.
56357+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56358+ created.
56359+
56360+config GRKERNSEC_HARDEN_PTRACE
56361+ bool "Deter ptrace-based process snooping"
56362+ default y if GRKERNSEC_CONFIG_AUTO
56363+ help
56364+ If you say Y here, TTY sniffers and other malicious monitoring
56365+ programs implemented through ptrace will be defeated. If you
56366+ have been using the RBAC system, this option has already been
56367+ enabled for several years for all users, with the ability to make
56368+ fine-grained exceptions.
56369+
56370+ This option only affects the ability of non-root users to ptrace
56371+ processes that are not a descendent of the ptracing process.
56372+ This means that strace ./binary and gdb ./binary will still work,
56373+ but attaching to arbitrary processes will not. If the sysctl
56374+ option is enabled, a sysctl option with name "harden_ptrace" is
56375+ created.
56376+
56377+config GRKERNSEC_PTRACE_READEXEC
56378+ bool "Require read access to ptrace sensitive binaries"
56379+ default y if GRKERNSEC_CONFIG_AUTO
56380+ help
56381+ If you say Y here, unprivileged users will not be able to ptrace unreadable
56382+ binaries. This option is useful in environments that
56383+ remove the read bits (e.g. file mode 4711) from suid binaries to
56384+ prevent infoleaking of their contents. This option adds
56385+ consistency to the use of that file mode, as the binary could normally
56386+ be read out when run without privileges while ptracing.
56387+
56388+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
56389+ is created.
56390+
56391+config GRKERNSEC_SETXID
56392+ bool "Enforce consistent multithreaded privileges"
56393+ default y if GRKERNSEC_CONFIG_AUTO
56394+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
56395+ help
56396+ If you say Y here, a change from a root uid to a non-root uid
56397+ in a multithreaded application will cause the resulting uids,
56398+ gids, supplementary groups, and capabilities in that thread
56399+ to be propagated to the other threads of the process. In most
56400+ cases this is unnecessary, as glibc will emulate this behavior
56401+ on behalf of the application. Other libcs do not act in the
56402+ same way, allowing the other threads of the process to continue
56403+ running with root privileges. If the sysctl option is enabled,
56404+ a sysctl option with name "consistent_setxid" is created.
56405+
56406+config GRKERNSEC_TPE
56407+ bool "Trusted Path Execution (TPE)"
56408+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
56409+ help
56410+ If you say Y here, you will be able to choose a gid to add to the
56411+ supplementary groups of users you want to mark as "untrusted."
56412+ These users will not be able to execute any files that are not in
56413+ root-owned directories writable only by root. If the sysctl option
56414+ is enabled, a sysctl option with name "tpe" is created.
56415+
56416+config GRKERNSEC_TPE_ALL
56417+ bool "Partially restrict all non-root users"
56418+ depends on GRKERNSEC_TPE
56419+ help
56420+ If you say Y here, all non-root users will be covered under
56421+ a weaker TPE restriction. This is separate from, and in addition to,
56422+ the main TPE options that you have selected elsewhere. Thus, if a
56423+ "trusted" GID is chosen, this restriction applies to even that GID.
56424+ Under this restriction, all non-root users will only be allowed to
56425+ execute files in directories they own that are not group or
56426+ world-writable, or in directories owned by root and writable only by
56427+ root. If the sysctl option is enabled, a sysctl option with name
56428+ "tpe_restrict_all" is created.
56429+
56430+config GRKERNSEC_TPE_INVERT
56431+ bool "Invert GID option"
56432+ depends on GRKERNSEC_TPE
56433+ help
56434+ If you say Y here, the group you specify in the TPE configuration will
56435+ decide what group TPE restrictions will be *disabled* for. This
56436+ option is useful if you want TPE restrictions to be applied to most
56437+ users on the system. If the sysctl option is enabled, a sysctl option
56438+ with name "tpe_invert" is created. Unlike other sysctl options, this
56439+ entry will default to on for backward-compatibility.
56440+
56441+config GRKERNSEC_TPE_GID
56442+ int
56443+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
56444+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
56445+
56446+config GRKERNSEC_TPE_UNTRUSTED_GID
56447+ int "GID for TPE-untrusted users"
56448+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56449+ default 1005
56450+ help
56451+ Setting this GID determines what group TPE restrictions will be
56452+ *enabled* for. If the sysctl option is enabled, a sysctl option
56453+ with name "tpe_gid" is created.
56454+
56455+config GRKERNSEC_TPE_TRUSTED_GID
56456+ int "GID for TPE-trusted users"
56457+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56458+ default 1005
56459+ help
56460+ Setting this GID determines what group TPE restrictions will be
56461+ *disabled* for. If the sysctl option is enabled, a sysctl option
56462+ with name "tpe_gid" is created.
56463+
56464+endmenu
56465+menu "Network Protections"
56466+depends on GRKERNSEC
56467+
56468+config GRKERNSEC_RANDNET
56469+ bool "Larger entropy pools"
56470+ default y if GRKERNSEC_CONFIG_AUTO
56471+ help
56472+ If you say Y here, the entropy pools used for many features of Linux
56473+ and grsecurity will be doubled in size. Since several grsecurity
56474+ features use additional randomness, it is recommended that you say Y
56475+ here. Saying Y here has a similar effect as modifying
56476+ /proc/sys/kernel/random/poolsize.
56477+
56478+config GRKERNSEC_BLACKHOLE
56479+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56480+ default y if GRKERNSEC_CONFIG_AUTO
56481+ depends on NET
56482+ help
56483+ If you say Y here, neither TCP resets nor ICMP
56484+ destination-unreachable packets will be sent in response to packets
56485+ sent to ports for which no associated listening process exists.
56486+ This feature supports both IPV4 and IPV6 and exempts the
56487+ loopback interface from blackholing. Enabling this feature
56488+ makes a host more resilient to DoS attacks and reduces network
56489+ visibility against scanners.
56490+
56491+ The blackhole feature as-implemented is equivalent to the FreeBSD
56492+ blackhole feature, as it prevents RST responses to all packets, not
56493+ just SYNs. Under most application behavior this causes no
56494+ problems, but applications (like haproxy) may not close certain
56495+ connections in a way that cleanly terminates them on the remote
56496+ end, leaving the remote host in LAST_ACK state. Because of this
56497+ side-effect and to prevent intentional LAST_ACK DoSes, this
56498+ feature also adds automatic mitigation against such attacks.
56499+ The mitigation drastically reduces the amount of time a socket
56500+ can spend in LAST_ACK state. If you're using haproxy and not
56501+ all servers it connects to have this option enabled, consider
56502+ disabling this feature on the haproxy host.
56503+
56504+ If the sysctl option is enabled, two sysctl options with names
56505+ "ip_blackhole" and "lastack_retries" will be created.
56506+ While "ip_blackhole" takes the standard zero/non-zero on/off
56507+ toggle, "lastack_retries" uses the same kinds of values as
56508+ "tcp_retries1" and "tcp_retries2". The default value of 4
56509+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56510+ state.
56511+
56512+config GRKERNSEC_NO_SIMULT_CONNECT
56513+ bool "Disable TCP Simultaneous Connect"
56514+ default y if GRKERNSEC_CONFIG_AUTO
56515+ depends on NET
56516+ help
56517+ If you say Y here, a feature by Willy Tarreau will be enabled that
56518+ removes a weakness in Linux's strict implementation of TCP that
56519+ allows two clients to connect to each other without either entering
56520+ a listening state. The weakness allows an attacker to easily prevent
56521+ a client from connecting to a known server provided the source port
56522+ for the connection is guessed correctly.
56523+
56524+ As the weakness could be used to prevent an antivirus or IPS from
56525+ fetching updates, or prevent an SSL gateway from fetching a CRL,
56526+ it should be eliminated by enabling this option. Though Linux is
56527+ one of few operating systems supporting simultaneous connect, it
56528+ has no legitimate use in practice and is rarely supported by firewalls.
56529+
56530+config GRKERNSEC_SOCKET
56531+ bool "Socket restrictions"
56532+ depends on NET
56533+ help
56534+ If you say Y here, you will be able to choose from several options.
56535+ If you assign a GID on your system and add it to the supplementary
56536+ groups of users you want to restrict socket access to, this patch
56537+ will perform up to three things, based on the option(s) you choose.
56538+
56539+config GRKERNSEC_SOCKET_ALL
56540+ bool "Deny any sockets to group"
56541+ depends on GRKERNSEC_SOCKET
56542+ help
56543+ If you say Y here, you will be able to choose a GID of whose users will
56544+ be unable to connect to other hosts from your machine or run server
56545+ applications from your machine. If the sysctl option is enabled, a
56546+ sysctl option with name "socket_all" is created.
56547+
56548+config GRKERNSEC_SOCKET_ALL_GID
56549+ int "GID to deny all sockets for"
56550+ depends on GRKERNSEC_SOCKET_ALL
56551+ default 1004
56552+ help
56553+ Here you can choose the GID to disable socket access for. Remember to
56554+ add the users you want socket access disabled for to the GID
56555+ specified here. If the sysctl option is enabled, a sysctl option
56556+ with name "socket_all_gid" is created.
56557+
56558+config GRKERNSEC_SOCKET_CLIENT
56559+ bool "Deny client sockets to group"
56560+ depends on GRKERNSEC_SOCKET
56561+ help
56562+ If you say Y here, you will be able to choose a GID of whose users will
56563+ be unable to connect to other hosts from your machine, but will be
56564+ able to run servers. If this option is enabled, all users in the group
56565+ you specify will have to use passive mode when initiating ftp transfers
56566+ from the shell on your machine. If the sysctl option is enabled, a
56567+ sysctl option with name "socket_client" is created.
56568+
56569+config GRKERNSEC_SOCKET_CLIENT_GID
56570+ int "GID to deny client sockets for"
56571+ depends on GRKERNSEC_SOCKET_CLIENT
56572+ default 1003
56573+ help
56574+ Here you can choose the GID to disable client socket access for.
56575+ Remember to add the users you want client socket access disabled for to
56576+ the GID specified here. If the sysctl option is enabled, a sysctl
56577+ option with name "socket_client_gid" is created.
56578+
56579+config GRKERNSEC_SOCKET_SERVER
56580+ bool "Deny server sockets to group"
56581+ depends on GRKERNSEC_SOCKET
56582+ help
56583+ If you say Y here, you will be able to choose a GID of whose users will
56584+ be unable to run server applications from your machine. If the sysctl
56585+ option is enabled, a sysctl option with name "socket_server" is created.
56586+
56587+config GRKERNSEC_SOCKET_SERVER_GID
56588+ int "GID to deny server sockets for"
56589+ depends on GRKERNSEC_SOCKET_SERVER
56590+ default 1002
56591+ help
56592+ Here you can choose the GID to disable server socket access for.
56593+ Remember to add the users you want server socket access disabled for to
56594+ the GID specified here. If the sysctl option is enabled, a sysctl
56595+ option with name "socket_server_gid" is created.
56596+
56597+endmenu
56598+menu "Sysctl Support"
56599+depends on GRKERNSEC && SYSCTL
56600+
56601+config GRKERNSEC_SYSCTL
56602+ bool "Sysctl support"
56603+ default y if GRKERNSEC_CONFIG_AUTO
56604+ help
56605+ If you say Y here, you will be able to change the options that
56606+ grsecurity runs with at bootup, without having to recompile your
56607+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
56608+ to enable (1) or disable (0) various features. All the sysctl entries
56609+ are mutable until the "grsec_lock" entry is set to a non-zero value.
56610+ All features enabled in the kernel configuration are disabled at boot
56611+ if you do not say Y to the "Turn on features by default" option.
56612+ All options should be set at startup, and the grsec_lock entry should
56613+ be set to a non-zero value after all the options are set.
56614+ *THIS IS EXTREMELY IMPORTANT*
56615+
56616+config GRKERNSEC_SYSCTL_DISTRO
56617+ bool "Extra sysctl support for distro makers (READ HELP)"
56618+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
56619+ help
56620+ If you say Y here, additional sysctl options will be created
56621+ for features that affect processes running as root. Therefore,
56622+ it is critical when using this option that the grsec_lock entry be
56623+ enabled after boot. Only distros with prebuilt kernel packages
56624+ with this option enabled that can ensure grsec_lock is enabled
56625+ after boot should use this option.
56626+ *Failure to set grsec_lock after boot makes all grsec features
56627+ this option covers useless*
56628+
56629+ Currently this option creates the following sysctl entries:
56630+ "Disable Privileged I/O": "disable_priv_io"
56631+
56632+config GRKERNSEC_SYSCTL_ON
56633+ bool "Turn on features by default"
56634+ default y if GRKERNSEC_CONFIG_AUTO
56635+ depends on GRKERNSEC_SYSCTL
56636+ help
56637+ If you say Y here, instead of having all features enabled in the
56638+ kernel configuration disabled at boot time, the features will be
56639+ enabled at boot time. It is recommended you say Y here unless
56640+ there is some reason you would want all sysctl-tunable features to
56641+ be disabled by default. As mentioned elsewhere, it is important
56642+ to enable the grsec_lock entry once you have finished modifying
56643+ the sysctl entries.
56644+
56645+endmenu
56646+menu "Logging Options"
56647+depends on GRKERNSEC
56648+
56649+config GRKERNSEC_FLOODTIME
56650+ int "Seconds in between log messages (minimum)"
56651+ default 10
56652+ help
56653+ This option allows you to enforce the number of seconds between
56654+ grsecurity log messages. The default should be suitable for most
56655+ people, however, if you choose to change it, choose a value small enough
56656+ to allow informative logs to be produced, but large enough to
56657+ prevent flooding.
56658+
56659+config GRKERNSEC_FLOODBURST
56660+ int "Number of messages in a burst (maximum)"
56661+ default 6
56662+ help
56663+ This option allows you to choose the maximum number of messages allowed
56664+ within the flood time interval you chose in a separate option. The
56665+ default should be suitable for most people, however if you find that
56666+ many of your logs are being interpreted as flooding, you may want to
56667+ raise this value.
56668+
56669+endmenu
56670diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56671new file mode 100644
56672index 0000000..1b9afa9
56673--- /dev/null
56674+++ b/grsecurity/Makefile
56675@@ -0,0 +1,38 @@
56676+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56677+# during 2001-2009 it has been completely redesigned by Brad Spengler
56678+# into an RBAC system
56679+#
56680+# All code in this directory and various hooks inserted throughout the kernel
56681+# are copyright Brad Spengler - Open Source Security, Inc., and released
56682+# under the GPL v2 or higher
56683+
56684+KBUILD_CFLAGS += -Werror
56685+
56686+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56687+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56688+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56689+
56690+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56691+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56692+ gracl_learn.o grsec_log.o
56693+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56694+
56695+ifdef CONFIG_NET
56696+obj-y += grsec_sock.o
56697+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56698+endif
56699+
56700+ifndef CONFIG_GRKERNSEC
56701+obj-y += grsec_disabled.o
56702+endif
56703+
56704+ifdef CONFIG_GRKERNSEC_HIDESYM
56705+extra-y := grsec_hidesym.o
56706+$(obj)/grsec_hidesym.o:
56707+ @-chmod -f 500 /boot
56708+ @-chmod -f 500 /lib/modules
56709+ @-chmod -f 500 /lib64/modules
56710+ @-chmod -f 500 /lib32/modules
56711+ @-chmod -f 700 .
56712+ @echo ' grsec: protected kernel image paths'
56713+endif
56714diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56715new file mode 100644
56716index 0000000..0767b2e
56717--- /dev/null
56718+++ b/grsecurity/gracl.c
56719@@ -0,0 +1,4067 @@
56720+#include <linux/kernel.h>
56721+#include <linux/module.h>
56722+#include <linux/sched.h>
56723+#include <linux/mm.h>
56724+#include <linux/file.h>
56725+#include <linux/fs.h>
56726+#include <linux/namei.h>
56727+#include <linux/mount.h>
56728+#include <linux/tty.h>
56729+#include <linux/proc_fs.h>
56730+#include <linux/lglock.h>
56731+#include <linux/slab.h>
56732+#include <linux/vmalloc.h>
56733+#include <linux/types.h>
56734+#include <linux/sysctl.h>
56735+#include <linux/netdevice.h>
56736+#include <linux/ptrace.h>
56737+#include <linux/gracl.h>
56738+#include <linux/gralloc.h>
56739+#include <linux/security.h>
56740+#include <linux/grinternal.h>
56741+#include <linux/pid_namespace.h>
56742+#include <linux/stop_machine.h>
56743+#include <linux/fdtable.h>
56744+#include <linux/percpu.h>
56745+#include <linux/lglock.h>
56746+#include <linux/hugetlb.h>
56747+#include "../fs/mount.h"
56748+
56749+#include <asm/uaccess.h>
56750+#include <asm/errno.h>
56751+#include <asm/mman.h>
56752+
56753+extern struct lglock vfsmount_lock;
56754+
56755+static struct acl_role_db acl_role_set;
56756+static struct name_db name_set;
56757+static struct inodev_db inodev_set;
56758+
56759+/* for keeping track of userspace pointers used for subjects, so we
56760+ can share references in the kernel as well
56761+*/
56762+
56763+static struct path real_root;
56764+
56765+static struct acl_subj_map_db subj_map_set;
56766+
56767+static struct acl_role_label *default_role;
56768+
56769+static struct acl_role_label *role_list;
56770+
56771+static u16 acl_sp_role_value;
56772+
56773+extern char *gr_shared_page[4];
56774+static DEFINE_MUTEX(gr_dev_mutex);
56775+DEFINE_RWLOCK(gr_inode_lock);
56776+
56777+struct gr_arg *gr_usermode;
56778+
56779+static unsigned int gr_status __read_only = GR_STATUS_INIT;
56780+
56781+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56782+extern void gr_clear_learn_entries(void);
56783+
56784+unsigned char *gr_system_salt;
56785+unsigned char *gr_system_sum;
56786+
56787+static struct sprole_pw **acl_special_roles = NULL;
56788+static __u16 num_sprole_pws = 0;
56789+
56790+static struct acl_role_label *kernel_role = NULL;
56791+
56792+static unsigned int gr_auth_attempts = 0;
56793+static unsigned long gr_auth_expires = 0UL;
56794+
56795+#ifdef CONFIG_NET
56796+extern struct vfsmount *sock_mnt;
56797+#endif
56798+
56799+extern struct vfsmount *pipe_mnt;
56800+extern struct vfsmount *shm_mnt;
56801+
56802+#ifdef CONFIG_HUGETLBFS
56803+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
56804+#endif
56805+
56806+static struct acl_object_label *fakefs_obj_rw;
56807+static struct acl_object_label *fakefs_obj_rwx;
56808+
56809+extern int gr_init_uidset(void);
56810+extern void gr_free_uidset(void);
56811+extern void gr_remove_uid(uid_t uid);
56812+extern int gr_find_uid(uid_t uid);
56813+
56814+__inline__ int
56815+gr_acl_is_enabled(void)
56816+{
56817+ return (gr_status & GR_READY);
56818+}
56819+
56820+#ifdef CONFIG_BTRFS_FS
56821+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56822+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56823+#endif
56824+
56825+static inline dev_t __get_dev(const struct dentry *dentry)
56826+{
56827+#ifdef CONFIG_BTRFS_FS
56828+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56829+ return get_btrfs_dev_from_inode(dentry->d_inode);
56830+ else
56831+#endif
56832+ return dentry->d_inode->i_sb->s_dev;
56833+}
56834+
56835+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56836+{
56837+ return __get_dev(dentry);
56838+}
56839+
56840+static char gr_task_roletype_to_char(struct task_struct *task)
56841+{
56842+ switch (task->role->roletype &
56843+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56844+ GR_ROLE_SPECIAL)) {
56845+ case GR_ROLE_DEFAULT:
56846+ return 'D';
56847+ case GR_ROLE_USER:
56848+ return 'U';
56849+ case GR_ROLE_GROUP:
56850+ return 'G';
56851+ case GR_ROLE_SPECIAL:
56852+ return 'S';
56853+ }
56854+
56855+ return 'X';
56856+}
56857+
56858+char gr_roletype_to_char(void)
56859+{
56860+ return gr_task_roletype_to_char(current);
56861+}
56862+
56863+__inline__ int
56864+gr_acl_tpe_check(void)
56865+{
56866+ if (unlikely(!(gr_status & GR_READY)))
56867+ return 0;
56868+ if (current->role->roletype & GR_ROLE_TPE)
56869+ return 1;
56870+ else
56871+ return 0;
56872+}
56873+
56874+int
56875+gr_handle_rawio(const struct inode *inode)
56876+{
56877+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56878+ if (inode && S_ISBLK(inode->i_mode) &&
56879+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56880+ !capable(CAP_SYS_RAWIO))
56881+ return 1;
56882+#endif
56883+ return 0;
56884+}
56885+
56886+static int
56887+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56888+{
56889+ if (likely(lena != lenb))
56890+ return 0;
56891+
56892+ return !memcmp(a, b, lena);
56893+}
56894+
56895+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56896+{
56897+ *buflen -= namelen;
56898+ if (*buflen < 0)
56899+ return -ENAMETOOLONG;
56900+ *buffer -= namelen;
56901+ memcpy(*buffer, str, namelen);
56902+ return 0;
56903+}
56904+
56905+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
56906+{
56907+ return prepend(buffer, buflen, name->name, name->len);
56908+}
56909+
56910+static int prepend_path(const struct path *path, struct path *root,
56911+ char **buffer, int *buflen)
56912+{
56913+ struct dentry *dentry = path->dentry;
56914+ struct vfsmount *vfsmnt = path->mnt;
56915+ struct mount *mnt = real_mount(vfsmnt);
56916+ bool slash = false;
56917+ int error = 0;
56918+
56919+ while (dentry != root->dentry || vfsmnt != root->mnt) {
56920+ struct dentry * parent;
56921+
56922+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56923+ /* Global root? */
56924+ if (!mnt_has_parent(mnt)) {
56925+ goto out;
56926+ }
56927+ dentry = mnt->mnt_mountpoint;
56928+ mnt = mnt->mnt_parent;
56929+ vfsmnt = &mnt->mnt;
56930+ continue;
56931+ }
56932+ parent = dentry->d_parent;
56933+ prefetch(parent);
56934+ spin_lock(&dentry->d_lock);
56935+ error = prepend_name(buffer, buflen, &dentry->d_name);
56936+ spin_unlock(&dentry->d_lock);
56937+ if (!error)
56938+ error = prepend(buffer, buflen, "/", 1);
56939+ if (error)
56940+ break;
56941+
56942+ slash = true;
56943+ dentry = parent;
56944+ }
56945+
56946+out:
56947+ if (!error && !slash)
56948+ error = prepend(buffer, buflen, "/", 1);
56949+
56950+ return error;
56951+}
56952+
56953+/* this must be called with vfsmount_lock and rename_lock held */
56954+
56955+static char *__our_d_path(const struct path *path, struct path *root,
56956+ char *buf, int buflen)
56957+{
56958+ char *res = buf + buflen;
56959+ int error;
56960+
56961+ prepend(&res, &buflen, "\0", 1);
56962+ error = prepend_path(path, root, &res, &buflen);
56963+ if (error)
56964+ return ERR_PTR(error);
56965+
56966+ return res;
56967+}
56968+
56969+static char *
56970+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
56971+{
56972+ char *retval;
56973+
56974+ retval = __our_d_path(path, root, buf, buflen);
56975+ if (unlikely(IS_ERR(retval)))
56976+ retval = strcpy(buf, "<path too long>");
56977+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56978+ retval[1] = '\0';
56979+
56980+ return retval;
56981+}
56982+
56983+static char *
56984+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56985+ char *buf, int buflen)
56986+{
56987+ struct path path;
56988+ char *res;
56989+
56990+ path.dentry = (struct dentry *)dentry;
56991+ path.mnt = (struct vfsmount *)vfsmnt;
56992+
56993+ /* we can use real_root.dentry, real_root.mnt, because this is only called
56994+ by the RBAC system */
56995+ res = gen_full_path(&path, &real_root, buf, buflen);
56996+
56997+ return res;
56998+}
56999+
57000+static char *
57001+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
57002+ char *buf, int buflen)
57003+{
57004+ char *res;
57005+ struct path path;
57006+ struct path root;
57007+ struct task_struct *reaper = init_pid_ns.child_reaper;
57008+
57009+ path.dentry = (struct dentry *)dentry;
57010+ path.mnt = (struct vfsmount *)vfsmnt;
57011+
57012+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
57013+ get_fs_root(reaper->fs, &root);
57014+
57015+ write_seqlock(&rename_lock);
57016+ br_read_lock(&vfsmount_lock);
57017+ res = gen_full_path(&path, &root, buf, buflen);
57018+ br_read_unlock(&vfsmount_lock);
57019+ write_sequnlock(&rename_lock);
57020+
57021+ path_put(&root);
57022+ return res;
57023+}
57024+
57025+static char *
57026+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57027+{
57028+ char *ret;
57029+ write_seqlock(&rename_lock);
57030+ br_read_lock(&vfsmount_lock);
57031+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57032+ PAGE_SIZE);
57033+ br_read_unlock(&vfsmount_lock);
57034+ write_sequnlock(&rename_lock);
57035+ return ret;
57036+}
57037+
57038+static char *
57039+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
57040+{
57041+ char *ret;
57042+ char *buf;
57043+ int buflen;
57044+
57045+ write_seqlock(&rename_lock);
57046+ br_read_lock(&vfsmount_lock);
57047+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
57048+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
57049+ buflen = (int)(ret - buf);
57050+ if (buflen >= 5)
57051+ prepend(&ret, &buflen, "/proc", 5);
57052+ else
57053+ ret = strcpy(buf, "<path too long>");
57054+ br_read_unlock(&vfsmount_lock);
57055+ write_sequnlock(&rename_lock);
57056+ return ret;
57057+}
57058+
57059+char *
57060+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
57061+{
57062+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
57063+ PAGE_SIZE);
57064+}
57065+
57066+char *
57067+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
57068+{
57069+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
57070+ PAGE_SIZE);
57071+}
57072+
57073+char *
57074+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
57075+{
57076+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
57077+ PAGE_SIZE);
57078+}
57079+
57080+char *
57081+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
57082+{
57083+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
57084+ PAGE_SIZE);
57085+}
57086+
57087+char *
57088+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
57089+{
57090+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
57091+ PAGE_SIZE);
57092+}
57093+
57094+__inline__ __u32
57095+to_gr_audit(const __u32 reqmode)
57096+{
57097+ /* masks off auditable permission flags, then shifts them to create
57098+ auditing flags, and adds the special case of append auditing if
57099+ we're requesting write */
57100+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
57101+}
57102+
57103+struct acl_subject_label *
57104+lookup_subject_map(const struct acl_subject_label *userp)
57105+{
57106+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
57107+ struct subject_map *match;
57108+
57109+ match = subj_map_set.s_hash[index];
57110+
57111+ while (match && match->user != userp)
57112+ match = match->next;
57113+
57114+ if (match != NULL)
57115+ return match->kernel;
57116+ else
57117+ return NULL;
57118+}
57119+
57120+static void
57121+insert_subj_map_entry(struct subject_map *subjmap)
57122+{
57123+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
57124+ struct subject_map **curr;
57125+
57126+ subjmap->prev = NULL;
57127+
57128+ curr = &subj_map_set.s_hash[index];
57129+ if (*curr != NULL)
57130+ (*curr)->prev = subjmap;
57131+
57132+ subjmap->next = *curr;
57133+ *curr = subjmap;
57134+
57135+ return;
57136+}
57137+
57138+static struct acl_role_label *
57139+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
57140+ const gid_t gid)
57141+{
57142+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
57143+ struct acl_role_label *match;
57144+ struct role_allowed_ip *ipp;
57145+ unsigned int x;
57146+ u32 curr_ip = task->signal->curr_ip;
57147+
57148+ task->signal->saved_ip = curr_ip;
57149+
57150+ match = acl_role_set.r_hash[index];
57151+
57152+ while (match) {
57153+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
57154+ for (x = 0; x < match->domain_child_num; x++) {
57155+ if (match->domain_children[x] == uid)
57156+ goto found;
57157+ }
57158+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
57159+ break;
57160+ match = match->next;
57161+ }
57162+found:
57163+ if (match == NULL) {
57164+ try_group:
57165+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
57166+ match = acl_role_set.r_hash[index];
57167+
57168+ while (match) {
57169+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
57170+ for (x = 0; x < match->domain_child_num; x++) {
57171+ if (match->domain_children[x] == gid)
57172+ goto found2;
57173+ }
57174+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
57175+ break;
57176+ match = match->next;
57177+ }
57178+found2:
57179+ if (match == NULL)
57180+ match = default_role;
57181+ if (match->allowed_ips == NULL)
57182+ return match;
57183+ else {
57184+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57185+ if (likely
57186+ ((ntohl(curr_ip) & ipp->netmask) ==
57187+ (ntohl(ipp->addr) & ipp->netmask)))
57188+ return match;
57189+ }
57190+ match = default_role;
57191+ }
57192+ } else if (match->allowed_ips == NULL) {
57193+ return match;
57194+ } else {
57195+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
57196+ if (likely
57197+ ((ntohl(curr_ip) & ipp->netmask) ==
57198+ (ntohl(ipp->addr) & ipp->netmask)))
57199+ return match;
57200+ }
57201+ goto try_group;
57202+ }
57203+
57204+ return match;
57205+}
57206+
57207+struct acl_subject_label *
57208+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
57209+ const struct acl_role_label *role)
57210+{
57211+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57212+ struct acl_subject_label *match;
57213+
57214+ match = role->subj_hash[index];
57215+
57216+ while (match && (match->inode != ino || match->device != dev ||
57217+ (match->mode & GR_DELETED))) {
57218+ match = match->next;
57219+ }
57220+
57221+ if (match && !(match->mode & GR_DELETED))
57222+ return match;
57223+ else
57224+ return NULL;
57225+}
57226+
57227+struct acl_subject_label *
57228+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
57229+ const struct acl_role_label *role)
57230+{
57231+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
57232+ struct acl_subject_label *match;
57233+
57234+ match = role->subj_hash[index];
57235+
57236+ while (match && (match->inode != ino || match->device != dev ||
57237+ !(match->mode & GR_DELETED))) {
57238+ match = match->next;
57239+ }
57240+
57241+ if (match && (match->mode & GR_DELETED))
57242+ return match;
57243+ else
57244+ return NULL;
57245+}
57246+
57247+static struct acl_object_label *
57248+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
57249+ const struct acl_subject_label *subj)
57250+{
57251+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57252+ struct acl_object_label *match;
57253+
57254+ match = subj->obj_hash[index];
57255+
57256+ while (match && (match->inode != ino || match->device != dev ||
57257+ (match->mode & GR_DELETED))) {
57258+ match = match->next;
57259+ }
57260+
57261+ if (match && !(match->mode & GR_DELETED))
57262+ return match;
57263+ else
57264+ return NULL;
57265+}
57266+
57267+static struct acl_object_label *
57268+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
57269+ const struct acl_subject_label *subj)
57270+{
57271+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
57272+ struct acl_object_label *match;
57273+
57274+ match = subj->obj_hash[index];
57275+
57276+ while (match && (match->inode != ino || match->device != dev ||
57277+ !(match->mode & GR_DELETED))) {
57278+ match = match->next;
57279+ }
57280+
57281+ if (match && (match->mode & GR_DELETED))
57282+ return match;
57283+
57284+ match = subj->obj_hash[index];
57285+
57286+ while (match && (match->inode != ino || match->device != dev ||
57287+ (match->mode & GR_DELETED))) {
57288+ match = match->next;
57289+ }
57290+
57291+ if (match && !(match->mode & GR_DELETED))
57292+ return match;
57293+ else
57294+ return NULL;
57295+}
57296+
57297+static struct name_entry *
57298+lookup_name_entry(const char *name)
57299+{
57300+ unsigned int len = strlen(name);
57301+ unsigned int key = full_name_hash(name, len);
57302+ unsigned int index = key % name_set.n_size;
57303+ struct name_entry *match;
57304+
57305+ match = name_set.n_hash[index];
57306+
57307+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
57308+ match = match->next;
57309+
57310+ return match;
57311+}
57312+
57313+static struct name_entry *
57314+lookup_name_entry_create(const char *name)
57315+{
57316+ unsigned int len = strlen(name);
57317+ unsigned int key = full_name_hash(name, len);
57318+ unsigned int index = key % name_set.n_size;
57319+ struct name_entry *match;
57320+
57321+ match = name_set.n_hash[index];
57322+
57323+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57324+ !match->deleted))
57325+ match = match->next;
57326+
57327+ if (match && match->deleted)
57328+ return match;
57329+
57330+ match = name_set.n_hash[index];
57331+
57332+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
57333+ match->deleted))
57334+ match = match->next;
57335+
57336+ if (match && !match->deleted)
57337+ return match;
57338+ else
57339+ return NULL;
57340+}
57341+
57342+static struct inodev_entry *
57343+lookup_inodev_entry(const ino_t ino, const dev_t dev)
57344+{
57345+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
57346+ struct inodev_entry *match;
57347+
57348+ match = inodev_set.i_hash[index];
57349+
57350+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
57351+ match = match->next;
57352+
57353+ return match;
57354+}
57355+
57356+static void
57357+insert_inodev_entry(struct inodev_entry *entry)
57358+{
57359+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
57360+ inodev_set.i_size);
57361+ struct inodev_entry **curr;
57362+
57363+ entry->prev = NULL;
57364+
57365+ curr = &inodev_set.i_hash[index];
57366+ if (*curr != NULL)
57367+ (*curr)->prev = entry;
57368+
57369+ entry->next = *curr;
57370+ *curr = entry;
57371+
57372+ return;
57373+}
57374+
57375+static void
57376+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
57377+{
57378+ unsigned int index =
57379+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
57380+ struct acl_role_label **curr;
57381+ struct acl_role_label *tmp, *tmp2;
57382+
57383+ curr = &acl_role_set.r_hash[index];
57384+
57385+ /* simple case, slot is empty, just set it to our role */
57386+ if (*curr == NULL) {
57387+ *curr = role;
57388+ } else {
57389+ /* example:
57390+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
57391+ 2 -> 3
57392+ */
57393+ /* first check to see if we can already be reached via this slot */
57394+ tmp = *curr;
57395+ while (tmp && tmp != role)
57396+ tmp = tmp->next;
57397+ if (tmp == role) {
57398+ /* we don't need to add ourselves to this slot's chain */
57399+ return;
57400+ }
57401+ /* we need to add ourselves to this chain, two cases */
57402+ if (role->next == NULL) {
57403+ /* simple case, append the current chain to our role */
57404+ role->next = *curr;
57405+ *curr = role;
57406+ } else {
57407+ /* 1 -> 2 -> 3 -> 4
57408+ 2 -> 3 -> 4
57409+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
57410+ */
57411+ /* trickier case: walk our role's chain until we find
57412+ the role for the start of the current slot's chain */
57413+ tmp = role;
57414+ tmp2 = *curr;
57415+ while (tmp->next && tmp->next != tmp2)
57416+ tmp = tmp->next;
57417+ if (tmp->next == tmp2) {
57418+ /* from example above, we found 3, so just
57419+ replace this slot's chain with ours */
57420+ *curr = role;
57421+ } else {
57422+ /* we didn't find a subset of our role's chain
57423+ in the current slot's chain, so append their
57424+ chain to ours, and set us as the first role in
57425+ the slot's chain
57426+
57427+ we could fold this case with the case above,
57428+ but making it explicit for clarity
57429+ */
57430+ tmp->next = tmp2;
57431+ *curr = role;
57432+ }
57433+ }
57434+ }
57435+
57436+ return;
57437+}
57438+
57439+static void
57440+insert_acl_role_label(struct acl_role_label *role)
57441+{
57442+ int i;
57443+
57444+ if (role_list == NULL) {
57445+ role_list = role;
57446+ role->prev = NULL;
57447+ } else {
57448+ role->prev = role_list;
57449+ role_list = role;
57450+ }
57451+
57452+ /* used for hash chains */
57453+ role->next = NULL;
57454+
57455+ if (role->roletype & GR_ROLE_DOMAIN) {
57456+ for (i = 0; i < role->domain_child_num; i++)
57457+ __insert_acl_role_label(role, role->domain_children[i]);
57458+ } else
57459+ __insert_acl_role_label(role, role->uidgid);
57460+}
57461+
57462+static int
57463+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
57464+{
57465+ struct name_entry **curr, *nentry;
57466+ struct inodev_entry *ientry;
57467+ unsigned int len = strlen(name);
57468+ unsigned int key = full_name_hash(name, len);
57469+ unsigned int index = key % name_set.n_size;
57470+
57471+ curr = &name_set.n_hash[index];
57472+
57473+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
57474+ curr = &((*curr)->next);
57475+
57476+ if (*curr != NULL)
57477+ return 1;
57478+
57479+ nentry = acl_alloc(sizeof (struct name_entry));
57480+ if (nentry == NULL)
57481+ return 0;
57482+ ientry = acl_alloc(sizeof (struct inodev_entry));
57483+ if (ientry == NULL)
57484+ return 0;
57485+ ientry->nentry = nentry;
57486+
57487+ nentry->key = key;
57488+ nentry->name = name;
57489+ nentry->inode = inode;
57490+ nentry->device = device;
57491+ nentry->len = len;
57492+ nentry->deleted = deleted;
57493+
57494+ nentry->prev = NULL;
57495+ curr = &name_set.n_hash[index];
57496+ if (*curr != NULL)
57497+ (*curr)->prev = nentry;
57498+ nentry->next = *curr;
57499+ *curr = nentry;
57500+
57501+ /* insert us into the table searchable by inode/dev */
57502+ insert_inodev_entry(ientry);
57503+
57504+ return 1;
57505+}
57506+
57507+static void
57508+insert_acl_obj_label(struct acl_object_label *obj,
57509+ struct acl_subject_label *subj)
57510+{
57511+ unsigned int index =
57512+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
57513+ struct acl_object_label **curr;
57514+
57515+
57516+ obj->prev = NULL;
57517+
57518+ curr = &subj->obj_hash[index];
57519+ if (*curr != NULL)
57520+ (*curr)->prev = obj;
57521+
57522+ obj->next = *curr;
57523+ *curr = obj;
57524+
57525+ return;
57526+}
57527+
57528+static void
57529+insert_acl_subj_label(struct acl_subject_label *obj,
57530+ struct acl_role_label *role)
57531+{
57532+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
57533+ struct acl_subject_label **curr;
57534+
57535+ obj->prev = NULL;
57536+
57537+ curr = &role->subj_hash[index];
57538+ if (*curr != NULL)
57539+ (*curr)->prev = obj;
57540+
57541+ obj->next = *curr;
57542+ *curr = obj;
57543+
57544+ return;
57545+}
57546+
57547+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
57548+
57549+static void *
57550+create_table(__u32 * len, int elementsize)
57551+{
57552+ unsigned int table_sizes[] = {
57553+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
57554+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
57555+ 4194301, 8388593, 16777213, 33554393, 67108859
57556+ };
57557+ void *newtable = NULL;
57558+ unsigned int pwr = 0;
57559+
57560+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
57561+ table_sizes[pwr] <= *len)
57562+ pwr++;
57563+
57564+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
57565+ return newtable;
57566+
57567+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
57568+ newtable =
57569+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
57570+ else
57571+ newtable = vmalloc(table_sizes[pwr] * elementsize);
57572+
57573+ *len = table_sizes[pwr];
57574+
57575+ return newtable;
57576+}
57577+
57578+static int
57579+init_variables(const struct gr_arg *arg)
57580+{
57581+ struct task_struct *reaper = init_pid_ns.child_reaper;
57582+ unsigned int stacksize;
57583+
57584+ subj_map_set.s_size = arg->role_db.num_subjects;
57585+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
57586+ name_set.n_size = arg->role_db.num_objects;
57587+ inodev_set.i_size = arg->role_db.num_objects;
57588+
57589+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
57590+ !name_set.n_size || !inodev_set.i_size)
57591+ return 1;
57592+
57593+ if (!gr_init_uidset())
57594+ return 1;
57595+
57596+ /* set up the stack that holds allocation info */
57597+
57598+ stacksize = arg->role_db.num_pointers + 5;
57599+
57600+ if (!acl_alloc_stack_init(stacksize))
57601+ return 1;
57602+
57603+ /* grab reference for the real root dentry and vfsmount */
57604+ get_fs_root(reaper->fs, &real_root);
57605+
57606+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57607+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
57608+#endif
57609+
57610+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
57611+ if (fakefs_obj_rw == NULL)
57612+ return 1;
57613+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
57614+
57615+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
57616+ if (fakefs_obj_rwx == NULL)
57617+ return 1;
57618+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
57619+
57620+ subj_map_set.s_hash =
57621+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
57622+ acl_role_set.r_hash =
57623+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
57624+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
57625+ inodev_set.i_hash =
57626+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
57627+
57628+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
57629+ !name_set.n_hash || !inodev_set.i_hash)
57630+ return 1;
57631+
57632+ memset(subj_map_set.s_hash, 0,
57633+ sizeof(struct subject_map *) * subj_map_set.s_size);
57634+ memset(acl_role_set.r_hash, 0,
57635+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
57636+ memset(name_set.n_hash, 0,
57637+ sizeof (struct name_entry *) * name_set.n_size);
57638+ memset(inodev_set.i_hash, 0,
57639+ sizeof (struct inodev_entry *) * inodev_set.i_size);
57640+
57641+ return 0;
57642+}
57643+
57644+/* free information not needed after startup
57645+ currently contains user->kernel pointer mappings for subjects
57646+*/
57647+
57648+static void
57649+free_init_variables(void)
57650+{
57651+ __u32 i;
57652+
57653+ if (subj_map_set.s_hash) {
57654+ for (i = 0; i < subj_map_set.s_size; i++) {
57655+ if (subj_map_set.s_hash[i]) {
57656+ kfree(subj_map_set.s_hash[i]);
57657+ subj_map_set.s_hash[i] = NULL;
57658+ }
57659+ }
57660+
57661+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
57662+ PAGE_SIZE)
57663+ kfree(subj_map_set.s_hash);
57664+ else
57665+ vfree(subj_map_set.s_hash);
57666+ }
57667+
57668+ return;
57669+}
57670+
57671+static void
57672+free_variables(void)
57673+{
57674+ struct acl_subject_label *s;
57675+ struct acl_role_label *r;
57676+ struct task_struct *task, *task2;
57677+ unsigned int x;
57678+
57679+ gr_clear_learn_entries();
57680+
57681+ read_lock(&tasklist_lock);
57682+ do_each_thread(task2, task) {
57683+ task->acl_sp_role = 0;
57684+ task->acl_role_id = 0;
57685+ task->acl = NULL;
57686+ task->role = NULL;
57687+ } while_each_thread(task2, task);
57688+ read_unlock(&tasklist_lock);
57689+
57690+ /* release the reference to the real root dentry and vfsmount */
57691+ path_put(&real_root);
57692+ memset(&real_root, 0, sizeof(real_root));
57693+
57694+ /* free all object hash tables */
57695+
57696+ FOR_EACH_ROLE_START(r)
57697+ if (r->subj_hash == NULL)
57698+ goto next_role;
57699+ FOR_EACH_SUBJECT_START(r, s, x)
57700+ if (s->obj_hash == NULL)
57701+ break;
57702+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57703+ kfree(s->obj_hash);
57704+ else
57705+ vfree(s->obj_hash);
57706+ FOR_EACH_SUBJECT_END(s, x)
57707+ FOR_EACH_NESTED_SUBJECT_START(r, s)
57708+ if (s->obj_hash == NULL)
57709+ break;
57710+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57711+ kfree(s->obj_hash);
57712+ else
57713+ vfree(s->obj_hash);
57714+ FOR_EACH_NESTED_SUBJECT_END(s)
57715+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57716+ kfree(r->subj_hash);
57717+ else
57718+ vfree(r->subj_hash);
57719+ r->subj_hash = NULL;
57720+next_role:
57721+ FOR_EACH_ROLE_END(r)
57722+
57723+ acl_free_all();
57724+
57725+ if (acl_role_set.r_hash) {
57726+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57727+ PAGE_SIZE)
57728+ kfree(acl_role_set.r_hash);
57729+ else
57730+ vfree(acl_role_set.r_hash);
57731+ }
57732+ if (name_set.n_hash) {
57733+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
57734+ PAGE_SIZE)
57735+ kfree(name_set.n_hash);
57736+ else
57737+ vfree(name_set.n_hash);
57738+ }
57739+
57740+ if (inodev_set.i_hash) {
57741+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57742+ PAGE_SIZE)
57743+ kfree(inodev_set.i_hash);
57744+ else
57745+ vfree(inodev_set.i_hash);
57746+ }
57747+
57748+ gr_free_uidset();
57749+
57750+ memset(&name_set, 0, sizeof (struct name_db));
57751+ memset(&inodev_set, 0, sizeof (struct inodev_db));
57752+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57753+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57754+
57755+ default_role = NULL;
57756+ kernel_role = NULL;
57757+ role_list = NULL;
57758+
57759+ return;
57760+}
57761+
57762+static __u32
57763+count_user_objs(struct acl_object_label *userp)
57764+{
57765+ struct acl_object_label o_tmp;
57766+ __u32 num = 0;
57767+
57768+ while (userp) {
57769+ if (copy_from_user(&o_tmp, userp,
57770+ sizeof (struct acl_object_label)))
57771+ break;
57772+
57773+ userp = o_tmp.prev;
57774+ num++;
57775+ }
57776+
57777+ return num;
57778+}
57779+
57780+static struct acl_subject_label *
57781+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
57782+
57783+static int
57784+copy_user_glob(struct acl_object_label *obj)
57785+{
57786+ struct acl_object_label *g_tmp, **guser;
57787+ unsigned int len;
57788+ char *tmp;
57789+
57790+ if (obj->globbed == NULL)
57791+ return 0;
57792+
57793+ guser = &obj->globbed;
57794+ while (*guser) {
57795+ g_tmp = (struct acl_object_label *)
57796+ acl_alloc(sizeof (struct acl_object_label));
57797+ if (g_tmp == NULL)
57798+ return -ENOMEM;
57799+
57800+ if (copy_from_user(g_tmp, *guser,
57801+ sizeof (struct acl_object_label)))
57802+ return -EFAULT;
57803+
57804+ len = strnlen_user(g_tmp->filename, PATH_MAX);
57805+
57806+ if (!len || len >= PATH_MAX)
57807+ return -EINVAL;
57808+
57809+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57810+ return -ENOMEM;
57811+
57812+ if (copy_from_user(tmp, g_tmp->filename, len))
57813+ return -EFAULT;
57814+ tmp[len-1] = '\0';
57815+ g_tmp->filename = tmp;
57816+
57817+ *guser = g_tmp;
57818+ guser = &(g_tmp->next);
57819+ }
57820+
57821+ return 0;
57822+}
57823+
57824+static int
57825+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57826+ struct acl_role_label *role)
57827+{
57828+ struct acl_object_label *o_tmp;
57829+ unsigned int len;
57830+ int ret;
57831+ char *tmp;
57832+
57833+ while (userp) {
57834+ if ((o_tmp = (struct acl_object_label *)
57835+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
57836+ return -ENOMEM;
57837+
57838+ if (copy_from_user(o_tmp, userp,
57839+ sizeof (struct acl_object_label)))
57840+ return -EFAULT;
57841+
57842+ userp = o_tmp->prev;
57843+
57844+ len = strnlen_user(o_tmp->filename, PATH_MAX);
57845+
57846+ if (!len || len >= PATH_MAX)
57847+ return -EINVAL;
57848+
57849+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57850+ return -ENOMEM;
57851+
57852+ if (copy_from_user(tmp, o_tmp->filename, len))
57853+ return -EFAULT;
57854+ tmp[len-1] = '\0';
57855+ o_tmp->filename = tmp;
57856+
57857+ insert_acl_obj_label(o_tmp, subj);
57858+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57859+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57860+ return -ENOMEM;
57861+
57862+ ret = copy_user_glob(o_tmp);
57863+ if (ret)
57864+ return ret;
57865+
57866+ if (o_tmp->nested) {
57867+ int already_copied;
57868+
57869+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
57870+ if (IS_ERR(o_tmp->nested))
57871+ return PTR_ERR(o_tmp->nested);
57872+
57873+ /* insert into nested subject list if we haven't copied this one yet
57874+ to prevent duplicate entries */
57875+ if (!already_copied) {
57876+ o_tmp->nested->next = role->hash->first;
57877+ role->hash->first = o_tmp->nested;
57878+ }
57879+ }
57880+ }
57881+
57882+ return 0;
57883+}
57884+
57885+static __u32
57886+count_user_subjs(struct acl_subject_label *userp)
57887+{
57888+ struct acl_subject_label s_tmp;
57889+ __u32 num = 0;
57890+
57891+ while (userp) {
57892+ if (copy_from_user(&s_tmp, userp,
57893+ sizeof (struct acl_subject_label)))
57894+ break;
57895+
57896+ userp = s_tmp.prev;
57897+ }
57898+
57899+ return num;
57900+}
57901+
57902+static int
57903+copy_user_allowedips(struct acl_role_label *rolep)
57904+{
57905+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57906+
57907+ ruserip = rolep->allowed_ips;
57908+
57909+ while (ruserip) {
57910+ rlast = rtmp;
57911+
57912+ if ((rtmp = (struct role_allowed_ip *)
57913+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57914+ return -ENOMEM;
57915+
57916+ if (copy_from_user(rtmp, ruserip,
57917+ sizeof (struct role_allowed_ip)))
57918+ return -EFAULT;
57919+
57920+ ruserip = rtmp->prev;
57921+
57922+ if (!rlast) {
57923+ rtmp->prev = NULL;
57924+ rolep->allowed_ips = rtmp;
57925+ } else {
57926+ rlast->next = rtmp;
57927+ rtmp->prev = rlast;
57928+ }
57929+
57930+ if (!ruserip)
57931+ rtmp->next = NULL;
57932+ }
57933+
57934+ return 0;
57935+}
57936+
57937+static int
57938+copy_user_transitions(struct acl_role_label *rolep)
57939+{
57940+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57941+
57942+ unsigned int len;
57943+ char *tmp;
57944+
57945+ rusertp = rolep->transitions;
57946+
57947+ while (rusertp) {
57948+ rlast = rtmp;
57949+
57950+ if ((rtmp = (struct role_transition *)
57951+ acl_alloc(sizeof (struct role_transition))) == NULL)
57952+ return -ENOMEM;
57953+
57954+ if (copy_from_user(rtmp, rusertp,
57955+ sizeof (struct role_transition)))
57956+ return -EFAULT;
57957+
57958+ rusertp = rtmp->prev;
57959+
57960+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57961+
57962+ if (!len || len >= GR_SPROLE_LEN)
57963+ return -EINVAL;
57964+
57965+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57966+ return -ENOMEM;
57967+
57968+ if (copy_from_user(tmp, rtmp->rolename, len))
57969+ return -EFAULT;
57970+ tmp[len-1] = '\0';
57971+ rtmp->rolename = tmp;
57972+
57973+ if (!rlast) {
57974+ rtmp->prev = NULL;
57975+ rolep->transitions = rtmp;
57976+ } else {
57977+ rlast->next = rtmp;
57978+ rtmp->prev = rlast;
57979+ }
57980+
57981+ if (!rusertp)
57982+ rtmp->next = NULL;
57983+ }
57984+
57985+ return 0;
57986+}
57987+
57988+static struct acl_subject_label *
57989+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
57990+{
57991+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57992+ unsigned int len;
57993+ char *tmp;
57994+ __u32 num_objs;
57995+ struct acl_ip_label **i_tmp, *i_utmp2;
57996+ struct gr_hash_struct ghash;
57997+ struct subject_map *subjmap;
57998+ unsigned int i_num;
57999+ int err;
58000+
58001+ if (already_copied != NULL)
58002+ *already_copied = 0;
58003+
58004+ s_tmp = lookup_subject_map(userp);
58005+
58006+ /* we've already copied this subject into the kernel, just return
58007+ the reference to it, and don't copy it over again
58008+ */
58009+ if (s_tmp) {
58010+ if (already_copied != NULL)
58011+ *already_copied = 1;
58012+ return(s_tmp);
58013+ }
58014+
58015+ if ((s_tmp = (struct acl_subject_label *)
58016+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
58017+ return ERR_PTR(-ENOMEM);
58018+
58019+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
58020+ if (subjmap == NULL)
58021+ return ERR_PTR(-ENOMEM);
58022+
58023+ subjmap->user = userp;
58024+ subjmap->kernel = s_tmp;
58025+ insert_subj_map_entry(subjmap);
58026+
58027+ if (copy_from_user(s_tmp, userp,
58028+ sizeof (struct acl_subject_label)))
58029+ return ERR_PTR(-EFAULT);
58030+
58031+ len = strnlen_user(s_tmp->filename, PATH_MAX);
58032+
58033+ if (!len || len >= PATH_MAX)
58034+ return ERR_PTR(-EINVAL);
58035+
58036+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58037+ return ERR_PTR(-ENOMEM);
58038+
58039+ if (copy_from_user(tmp, s_tmp->filename, len))
58040+ return ERR_PTR(-EFAULT);
58041+ tmp[len-1] = '\0';
58042+ s_tmp->filename = tmp;
58043+
58044+ if (!strcmp(s_tmp->filename, "/"))
58045+ role->root_label = s_tmp;
58046+
58047+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
58048+ return ERR_PTR(-EFAULT);
58049+
58050+ /* copy user and group transition tables */
58051+
58052+ if (s_tmp->user_trans_num) {
58053+ uid_t *uidlist;
58054+
58055+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
58056+ if (uidlist == NULL)
58057+ return ERR_PTR(-ENOMEM);
58058+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
58059+ return ERR_PTR(-EFAULT);
58060+
58061+ s_tmp->user_transitions = uidlist;
58062+ }
58063+
58064+ if (s_tmp->group_trans_num) {
58065+ gid_t *gidlist;
58066+
58067+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
58068+ if (gidlist == NULL)
58069+ return ERR_PTR(-ENOMEM);
58070+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
58071+ return ERR_PTR(-EFAULT);
58072+
58073+ s_tmp->group_transitions = gidlist;
58074+ }
58075+
58076+ /* set up object hash table */
58077+ num_objs = count_user_objs(ghash.first);
58078+
58079+ s_tmp->obj_hash_size = num_objs;
58080+ s_tmp->obj_hash =
58081+ (struct acl_object_label **)
58082+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
58083+
58084+ if (!s_tmp->obj_hash)
58085+ return ERR_PTR(-ENOMEM);
58086+
58087+ memset(s_tmp->obj_hash, 0,
58088+ s_tmp->obj_hash_size *
58089+ sizeof (struct acl_object_label *));
58090+
58091+ /* add in objects */
58092+ err = copy_user_objs(ghash.first, s_tmp, role);
58093+
58094+ if (err)
58095+ return ERR_PTR(err);
58096+
58097+ /* set pointer for parent subject */
58098+ if (s_tmp->parent_subject) {
58099+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
58100+
58101+ if (IS_ERR(s_tmp2))
58102+ return s_tmp2;
58103+
58104+ s_tmp->parent_subject = s_tmp2;
58105+ }
58106+
58107+ /* add in ip acls */
58108+
58109+ if (!s_tmp->ip_num) {
58110+ s_tmp->ips = NULL;
58111+ goto insert;
58112+ }
58113+
58114+ i_tmp =
58115+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
58116+ sizeof (struct acl_ip_label *));
58117+
58118+ if (!i_tmp)
58119+ return ERR_PTR(-ENOMEM);
58120+
58121+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
58122+ *(i_tmp + i_num) =
58123+ (struct acl_ip_label *)
58124+ acl_alloc(sizeof (struct acl_ip_label));
58125+ if (!*(i_tmp + i_num))
58126+ return ERR_PTR(-ENOMEM);
58127+
58128+ if (copy_from_user
58129+ (&i_utmp2, s_tmp->ips + i_num,
58130+ sizeof (struct acl_ip_label *)))
58131+ return ERR_PTR(-EFAULT);
58132+
58133+ if (copy_from_user
58134+ (*(i_tmp + i_num), i_utmp2,
58135+ sizeof (struct acl_ip_label)))
58136+ return ERR_PTR(-EFAULT);
58137+
58138+ if ((*(i_tmp + i_num))->iface == NULL)
58139+ continue;
58140+
58141+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
58142+ if (!len || len >= IFNAMSIZ)
58143+ return ERR_PTR(-EINVAL);
58144+ tmp = acl_alloc(len);
58145+ if (tmp == NULL)
58146+ return ERR_PTR(-ENOMEM);
58147+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
58148+ return ERR_PTR(-EFAULT);
58149+ (*(i_tmp + i_num))->iface = tmp;
58150+ }
58151+
58152+ s_tmp->ips = i_tmp;
58153+
58154+insert:
58155+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
58156+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
58157+ return ERR_PTR(-ENOMEM);
58158+
58159+ return s_tmp;
58160+}
58161+
58162+static int
58163+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
58164+{
58165+ struct acl_subject_label s_pre;
58166+ struct acl_subject_label * ret;
58167+ int err;
58168+
58169+ while (userp) {
58170+ if (copy_from_user(&s_pre, userp,
58171+ sizeof (struct acl_subject_label)))
58172+ return -EFAULT;
58173+
58174+ ret = do_copy_user_subj(userp, role, NULL);
58175+
58176+ err = PTR_ERR(ret);
58177+ if (IS_ERR(ret))
58178+ return err;
58179+
58180+ insert_acl_subj_label(ret, role);
58181+
58182+ userp = s_pre.prev;
58183+ }
58184+
58185+ return 0;
58186+}
58187+
58188+static int
58189+copy_user_acl(struct gr_arg *arg)
58190+{
58191+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
58192+ struct acl_subject_label *subj_list;
58193+ struct sprole_pw *sptmp;
58194+ struct gr_hash_struct *ghash;
58195+ uid_t *domainlist;
58196+ unsigned int r_num;
58197+ unsigned int len;
58198+ char *tmp;
58199+ int err = 0;
58200+ __u16 i;
58201+ __u32 num_subjs;
58202+
58203+ /* we need a default and kernel role */
58204+ if (arg->role_db.num_roles < 2)
58205+ return -EINVAL;
58206+
58207+ /* copy special role authentication info from userspace */
58208+
58209+ num_sprole_pws = arg->num_sprole_pws;
58210+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
58211+
58212+ if (!acl_special_roles && num_sprole_pws)
58213+ return -ENOMEM;
58214+
58215+ for (i = 0; i < num_sprole_pws; i++) {
58216+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
58217+ if (!sptmp)
58218+ return -ENOMEM;
58219+ if (copy_from_user(sptmp, arg->sprole_pws + i,
58220+ sizeof (struct sprole_pw)))
58221+ return -EFAULT;
58222+
58223+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
58224+
58225+ if (!len || len >= GR_SPROLE_LEN)
58226+ return -EINVAL;
58227+
58228+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58229+ return -ENOMEM;
58230+
58231+ if (copy_from_user(tmp, sptmp->rolename, len))
58232+ return -EFAULT;
58233+
58234+ tmp[len-1] = '\0';
58235+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58236+ printk(KERN_ALERT "Copying special role %s\n", tmp);
58237+#endif
58238+ sptmp->rolename = tmp;
58239+ acl_special_roles[i] = sptmp;
58240+ }
58241+
58242+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
58243+
58244+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
58245+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
58246+
58247+ if (!r_tmp)
58248+ return -ENOMEM;
58249+
58250+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
58251+ sizeof (struct acl_role_label *)))
58252+ return -EFAULT;
58253+
58254+ if (copy_from_user(r_tmp, r_utmp2,
58255+ sizeof (struct acl_role_label)))
58256+ return -EFAULT;
58257+
58258+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
58259+
58260+ if (!len || len >= PATH_MAX)
58261+ return -EINVAL;
58262+
58263+ if ((tmp = (char *) acl_alloc(len)) == NULL)
58264+ return -ENOMEM;
58265+
58266+ if (copy_from_user(tmp, r_tmp->rolename, len))
58267+ return -EFAULT;
58268+
58269+ tmp[len-1] = '\0';
58270+ r_tmp->rolename = tmp;
58271+
58272+ if (!strcmp(r_tmp->rolename, "default")
58273+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
58274+ default_role = r_tmp;
58275+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
58276+ kernel_role = r_tmp;
58277+ }
58278+
58279+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
58280+ return -ENOMEM;
58281+
58282+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
58283+ return -EFAULT;
58284+
58285+ r_tmp->hash = ghash;
58286+
58287+ num_subjs = count_user_subjs(r_tmp->hash->first);
58288+
58289+ r_tmp->subj_hash_size = num_subjs;
58290+ r_tmp->subj_hash =
58291+ (struct acl_subject_label **)
58292+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
58293+
58294+ if (!r_tmp->subj_hash)
58295+ return -ENOMEM;
58296+
58297+ err = copy_user_allowedips(r_tmp);
58298+ if (err)
58299+ return err;
58300+
58301+ /* copy domain info */
58302+ if (r_tmp->domain_children != NULL) {
58303+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
58304+ if (domainlist == NULL)
58305+ return -ENOMEM;
58306+
58307+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
58308+ return -EFAULT;
58309+
58310+ r_tmp->domain_children = domainlist;
58311+ }
58312+
58313+ err = copy_user_transitions(r_tmp);
58314+ if (err)
58315+ return err;
58316+
58317+ memset(r_tmp->subj_hash, 0,
58318+ r_tmp->subj_hash_size *
58319+ sizeof (struct acl_subject_label *));
58320+
58321+ /* acquire the list of subjects, then NULL out
58322+ the list prior to parsing the subjects for this role,
58323+ as during this parsing the list is replaced with a list
58324+ of *nested* subjects for the role
58325+ */
58326+ subj_list = r_tmp->hash->first;
58327+
58328+ /* set nested subject list to null */
58329+ r_tmp->hash->first = NULL;
58330+
58331+ err = copy_user_subjs(subj_list, r_tmp);
58332+
58333+ if (err)
58334+ return err;
58335+
58336+ insert_acl_role_label(r_tmp);
58337+ }
58338+
58339+ if (default_role == NULL || kernel_role == NULL)
58340+ return -EINVAL;
58341+
58342+ return err;
58343+}
58344+
58345+static int
58346+gracl_init(struct gr_arg *args)
58347+{
58348+ int error = 0;
58349+
58350+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
58351+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
58352+
58353+ if (init_variables(args)) {
58354+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
58355+ error = -ENOMEM;
58356+ free_variables();
58357+ goto out;
58358+ }
58359+
58360+ error = copy_user_acl(args);
58361+ free_init_variables();
58362+ if (error) {
58363+ free_variables();
58364+ goto out;
58365+ }
58366+
58367+ if ((error = gr_set_acls(0))) {
58368+ free_variables();
58369+ goto out;
58370+ }
58371+
58372+ pax_open_kernel();
58373+ gr_status |= GR_READY;
58374+ pax_close_kernel();
58375+
58376+ out:
58377+ return error;
58378+}
58379+
58380+/* derived from glibc fnmatch() 0: match, 1: no match*/
58381+
58382+static int
58383+glob_match(const char *p, const char *n)
58384+{
58385+ char c;
58386+
58387+ while ((c = *p++) != '\0') {
58388+ switch (c) {
58389+ case '?':
58390+ if (*n == '\0')
58391+ return 1;
58392+ else if (*n == '/')
58393+ return 1;
58394+ break;
58395+ case '\\':
58396+ if (*n != c)
58397+ return 1;
58398+ break;
58399+ case '*':
58400+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
58401+ if (*n == '/')
58402+ return 1;
58403+ else if (c == '?') {
58404+ if (*n == '\0')
58405+ return 1;
58406+ else
58407+ ++n;
58408+ }
58409+ }
58410+ if (c == '\0') {
58411+ return 0;
58412+ } else {
58413+ const char *endp;
58414+
58415+ if ((endp = strchr(n, '/')) == NULL)
58416+ endp = n + strlen(n);
58417+
58418+ if (c == '[') {
58419+ for (--p; n < endp; ++n)
58420+ if (!glob_match(p, n))
58421+ return 0;
58422+ } else if (c == '/') {
58423+ while (*n != '\0' && *n != '/')
58424+ ++n;
58425+ if (*n == '/' && !glob_match(p, n + 1))
58426+ return 0;
58427+ } else {
58428+ for (--p; n < endp; ++n)
58429+ if (*n == c && !glob_match(p, n))
58430+ return 0;
58431+ }
58432+
58433+ return 1;
58434+ }
58435+ case '[':
58436+ {
58437+ int not;
58438+ char cold;
58439+
58440+ if (*n == '\0' || *n == '/')
58441+ return 1;
58442+
58443+ not = (*p == '!' || *p == '^');
58444+ if (not)
58445+ ++p;
58446+
58447+ c = *p++;
58448+ for (;;) {
58449+ unsigned char fn = (unsigned char)*n;
58450+
58451+ if (c == '\0')
58452+ return 1;
58453+ else {
58454+ if (c == fn)
58455+ goto matched;
58456+ cold = c;
58457+ c = *p++;
58458+
58459+ if (c == '-' && *p != ']') {
58460+ unsigned char cend = *p++;
58461+
58462+ if (cend == '\0')
58463+ return 1;
58464+
58465+ if (cold <= fn && fn <= cend)
58466+ goto matched;
58467+
58468+ c = *p++;
58469+ }
58470+ }
58471+
58472+ if (c == ']')
58473+ break;
58474+ }
58475+ if (!not)
58476+ return 1;
58477+ break;
58478+ matched:
58479+ while (c != ']') {
58480+ if (c == '\0')
58481+ return 1;
58482+
58483+ c = *p++;
58484+ }
58485+ if (not)
58486+ return 1;
58487+ }
58488+ break;
58489+ default:
58490+ if (c != *n)
58491+ return 1;
58492+ }
58493+
58494+ ++n;
58495+ }
58496+
58497+ if (*n == '\0')
58498+ return 0;
58499+
58500+ if (*n == '/')
58501+ return 0;
58502+
58503+ return 1;
58504+}
58505+
58506+static struct acl_object_label *
58507+chk_glob_label(struct acl_object_label *globbed,
58508+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
58509+{
58510+ struct acl_object_label *tmp;
58511+
58512+ if (*path == NULL)
58513+ *path = gr_to_filename_nolock(dentry, mnt);
58514+
58515+ tmp = globbed;
58516+
58517+ while (tmp) {
58518+ if (!glob_match(tmp->filename, *path))
58519+ return tmp;
58520+ tmp = tmp->next;
58521+ }
58522+
58523+ return NULL;
58524+}
58525+
58526+static struct acl_object_label *
58527+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58528+ const ino_t curr_ino, const dev_t curr_dev,
58529+ const struct acl_subject_label *subj, char **path, const int checkglob)
58530+{
58531+ struct acl_subject_label *tmpsubj;
58532+ struct acl_object_label *retval;
58533+ struct acl_object_label *retval2;
58534+
58535+ tmpsubj = (struct acl_subject_label *) subj;
58536+ read_lock(&gr_inode_lock);
58537+ do {
58538+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
58539+ if (retval) {
58540+ if (checkglob && retval->globbed) {
58541+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
58542+ if (retval2)
58543+ retval = retval2;
58544+ }
58545+ break;
58546+ }
58547+ } while ((tmpsubj = tmpsubj->parent_subject));
58548+ read_unlock(&gr_inode_lock);
58549+
58550+ return retval;
58551+}
58552+
58553+static __inline__ struct acl_object_label *
58554+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
58555+ struct dentry *curr_dentry,
58556+ const struct acl_subject_label *subj, char **path, const int checkglob)
58557+{
58558+ int newglob = checkglob;
58559+ ino_t inode;
58560+ dev_t device;
58561+
58562+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
58563+ as we don't want a / * rule to match instead of the / object
58564+ don't do this for create lookups that call this function though, since they're looking up
58565+ on the parent and thus need globbing checks on all paths
58566+ */
58567+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
58568+ newglob = GR_NO_GLOB;
58569+
58570+ spin_lock(&curr_dentry->d_lock);
58571+ inode = curr_dentry->d_inode->i_ino;
58572+ device = __get_dev(curr_dentry);
58573+ spin_unlock(&curr_dentry->d_lock);
58574+
58575+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
58576+}
58577+
58578+#ifdef CONFIG_HUGETLBFS
58579+static inline bool
58580+is_hugetlbfs_mnt(const struct vfsmount *mnt)
58581+{
58582+ int i;
58583+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
58584+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
58585+ return true;
58586+ }
58587+
58588+ return false;
58589+}
58590+#endif
58591+
58592+static struct acl_object_label *
58593+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58594+ const struct acl_subject_label *subj, char *path, const int checkglob)
58595+{
58596+ struct dentry *dentry = (struct dentry *) l_dentry;
58597+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58598+ struct mount *real_mnt = real_mount(mnt);
58599+ struct acl_object_label *retval;
58600+ struct dentry *parent;
58601+
58602+ write_seqlock(&rename_lock);
58603+ br_read_lock(&vfsmount_lock);
58604+
58605+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
58606+#ifdef CONFIG_NET
58607+ mnt == sock_mnt ||
58608+#endif
58609+#ifdef CONFIG_HUGETLBFS
58610+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
58611+#endif
58612+ /* ignore Eric Biederman */
58613+ IS_PRIVATE(l_dentry->d_inode))) {
58614+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
58615+ goto out;
58616+ }
58617+
58618+ for (;;) {
58619+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58620+ break;
58621+
58622+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58623+ if (!mnt_has_parent(real_mnt))
58624+ break;
58625+
58626+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58627+ if (retval != NULL)
58628+ goto out;
58629+
58630+ dentry = real_mnt->mnt_mountpoint;
58631+ real_mnt = real_mnt->mnt_parent;
58632+ mnt = &real_mnt->mnt;
58633+ continue;
58634+ }
58635+
58636+ parent = dentry->d_parent;
58637+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58638+ if (retval != NULL)
58639+ goto out;
58640+
58641+ dentry = parent;
58642+ }
58643+
58644+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
58645+
58646+ /* real_root is pinned so we don't have to hold a reference */
58647+ if (retval == NULL)
58648+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
58649+out:
58650+ br_read_unlock(&vfsmount_lock);
58651+ write_sequnlock(&rename_lock);
58652+
58653+ BUG_ON(retval == NULL);
58654+
58655+ return retval;
58656+}
58657+
58658+static __inline__ struct acl_object_label *
58659+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58660+ const struct acl_subject_label *subj)
58661+{
58662+ char *path = NULL;
58663+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
58664+}
58665+
58666+static __inline__ struct acl_object_label *
58667+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58668+ const struct acl_subject_label *subj)
58669+{
58670+ char *path = NULL;
58671+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
58672+}
58673+
58674+static __inline__ struct acl_object_label *
58675+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58676+ const struct acl_subject_label *subj, char *path)
58677+{
58678+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
58679+}
58680+
58681+static struct acl_subject_label *
58682+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
58683+ const struct acl_role_label *role)
58684+{
58685+ struct dentry *dentry = (struct dentry *) l_dentry;
58686+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
58687+ struct mount *real_mnt = real_mount(mnt);
58688+ struct acl_subject_label *retval;
58689+ struct dentry *parent;
58690+
58691+ write_seqlock(&rename_lock);
58692+ br_read_lock(&vfsmount_lock);
58693+
58694+ for (;;) {
58695+ if (dentry == real_root.dentry && mnt == real_root.mnt)
58696+ break;
58697+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58698+ if (!mnt_has_parent(real_mnt))
58699+ break;
58700+
58701+ spin_lock(&dentry->d_lock);
58702+ read_lock(&gr_inode_lock);
58703+ retval =
58704+ lookup_acl_subj_label(dentry->d_inode->i_ino,
58705+ __get_dev(dentry), role);
58706+ read_unlock(&gr_inode_lock);
58707+ spin_unlock(&dentry->d_lock);
58708+ if (retval != NULL)
58709+ goto out;
58710+
58711+ dentry = real_mnt->mnt_mountpoint;
58712+ real_mnt = real_mnt->mnt_parent;
58713+ mnt = &real_mnt->mnt;
58714+ continue;
58715+ }
58716+
58717+ spin_lock(&dentry->d_lock);
58718+ read_lock(&gr_inode_lock);
58719+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58720+ __get_dev(dentry), role);
58721+ read_unlock(&gr_inode_lock);
58722+ parent = dentry->d_parent;
58723+ spin_unlock(&dentry->d_lock);
58724+
58725+ if (retval != NULL)
58726+ goto out;
58727+
58728+ dentry = parent;
58729+ }
58730+
58731+ spin_lock(&dentry->d_lock);
58732+ read_lock(&gr_inode_lock);
58733+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58734+ __get_dev(dentry), role);
58735+ read_unlock(&gr_inode_lock);
58736+ spin_unlock(&dentry->d_lock);
58737+
58738+ if (unlikely(retval == NULL)) {
58739+ /* real_root is pinned, we don't need to hold a reference */
58740+ read_lock(&gr_inode_lock);
58741+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
58742+ __get_dev(real_root.dentry), role);
58743+ read_unlock(&gr_inode_lock);
58744+ }
58745+out:
58746+ br_read_unlock(&vfsmount_lock);
58747+ write_sequnlock(&rename_lock);
58748+
58749+ BUG_ON(retval == NULL);
58750+
58751+ return retval;
58752+}
58753+
58754+static void
58755+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58756+{
58757+ struct task_struct *task = current;
58758+ const struct cred *cred = current_cred();
58759+
58760+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58761+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58762+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58763+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58764+
58765+ return;
58766+}
58767+
58768+static void
58769+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
58770+{
58771+ struct task_struct *task = current;
58772+ const struct cred *cred = current_cred();
58773+
58774+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58775+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58776+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58777+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
58778+
58779+ return;
58780+}
58781+
58782+static void
58783+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
58784+{
58785+ struct task_struct *task = current;
58786+ const struct cred *cred = current_cred();
58787+
58788+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58789+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58790+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58791+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
58792+
58793+ return;
58794+}
58795+
58796+__u32
58797+gr_search_file(const struct dentry * dentry, const __u32 mode,
58798+ const struct vfsmount * mnt)
58799+{
58800+ __u32 retval = mode;
58801+ struct acl_subject_label *curracl;
58802+ struct acl_object_label *currobj;
58803+
58804+ if (unlikely(!(gr_status & GR_READY)))
58805+ return (mode & ~GR_AUDITS);
58806+
58807+ curracl = current->acl;
58808+
58809+ currobj = chk_obj_label(dentry, mnt, curracl);
58810+ retval = currobj->mode & mode;
58811+
58812+ /* if we're opening a specified transfer file for writing
58813+ (e.g. /dev/initctl), then transfer our role to init
58814+ */
58815+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58816+ current->role->roletype & GR_ROLE_PERSIST)) {
58817+ struct task_struct *task = init_pid_ns.child_reaper;
58818+
58819+ if (task->role != current->role) {
58820+ task->acl_sp_role = 0;
58821+ task->acl_role_id = current->acl_role_id;
58822+ task->role = current->role;
58823+ rcu_read_lock();
58824+ read_lock(&grsec_exec_file_lock);
58825+ gr_apply_subject_to_task(task);
58826+ read_unlock(&grsec_exec_file_lock);
58827+ rcu_read_unlock();
58828+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58829+ }
58830+ }
58831+
58832+ if (unlikely
58833+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58834+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58835+ __u32 new_mode = mode;
58836+
58837+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58838+
58839+ retval = new_mode;
58840+
58841+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58842+ new_mode |= GR_INHERIT;
58843+
58844+ if (!(mode & GR_NOLEARN))
58845+ gr_log_learn(dentry, mnt, new_mode);
58846+ }
58847+
58848+ return retval;
58849+}
58850+
58851+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58852+ const struct dentry *parent,
58853+ const struct vfsmount *mnt)
58854+{
58855+ struct name_entry *match;
58856+ struct acl_object_label *matchpo;
58857+ struct acl_subject_label *curracl;
58858+ char *path;
58859+
58860+ if (unlikely(!(gr_status & GR_READY)))
58861+ return NULL;
58862+
58863+ preempt_disable();
58864+ path = gr_to_filename_rbac(new_dentry, mnt);
58865+ match = lookup_name_entry_create(path);
58866+
58867+ curracl = current->acl;
58868+
58869+ if (match) {
58870+ read_lock(&gr_inode_lock);
58871+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58872+ read_unlock(&gr_inode_lock);
58873+
58874+ if (matchpo) {
58875+ preempt_enable();
58876+ return matchpo;
58877+ }
58878+ }
58879+
58880+ // lookup parent
58881+
58882+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58883+
58884+ preempt_enable();
58885+ return matchpo;
58886+}
58887+
58888+__u32
58889+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58890+ const struct vfsmount * mnt, const __u32 mode)
58891+{
58892+ struct acl_object_label *matchpo;
58893+ __u32 retval;
58894+
58895+ if (unlikely(!(gr_status & GR_READY)))
58896+ return (mode & ~GR_AUDITS);
58897+
58898+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58899+
58900+ retval = matchpo->mode & mode;
58901+
58902+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58903+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58904+ __u32 new_mode = mode;
58905+
58906+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58907+
58908+ gr_log_learn(new_dentry, mnt, new_mode);
58909+ return new_mode;
58910+ }
58911+
58912+ return retval;
58913+}
58914+
58915+__u32
58916+gr_check_link(const struct dentry * new_dentry,
58917+ const struct dentry * parent_dentry,
58918+ const struct vfsmount * parent_mnt,
58919+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58920+{
58921+ struct acl_object_label *obj;
58922+ __u32 oldmode, newmode;
58923+ __u32 needmode;
58924+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58925+ GR_DELETE | GR_INHERIT;
58926+
58927+ if (unlikely(!(gr_status & GR_READY)))
58928+ return (GR_CREATE | GR_LINK);
58929+
58930+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58931+ oldmode = obj->mode;
58932+
58933+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58934+ newmode = obj->mode;
58935+
58936+ needmode = newmode & checkmodes;
58937+
58938+ // old name for hardlink must have at least the permissions of the new name
58939+ if ((oldmode & needmode) != needmode)
58940+ goto bad;
58941+
58942+ // if old name had restrictions/auditing, make sure the new name does as well
58943+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58944+
58945+ // don't allow hardlinking of suid/sgid/fcapped files without permission
58946+ if (is_privileged_binary(old_dentry))
58947+ needmode |= GR_SETID;
58948+
58949+ if ((newmode & needmode) != needmode)
58950+ goto bad;
58951+
58952+ // enforce minimum permissions
58953+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58954+ return newmode;
58955+bad:
58956+ needmode = oldmode;
58957+ if (is_privileged_binary(old_dentry))
58958+ needmode |= GR_SETID;
58959+
58960+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58961+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58962+ return (GR_CREATE | GR_LINK);
58963+ } else if (newmode & GR_SUPPRESS)
58964+ return GR_SUPPRESS;
58965+ else
58966+ return 0;
58967+}
58968+
58969+int
58970+gr_check_hidden_task(const struct task_struct *task)
58971+{
58972+ if (unlikely(!(gr_status & GR_READY)))
58973+ return 0;
58974+
58975+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58976+ return 1;
58977+
58978+ return 0;
58979+}
58980+
58981+int
58982+gr_check_protected_task(const struct task_struct *task)
58983+{
58984+ if (unlikely(!(gr_status & GR_READY) || !task))
58985+ return 0;
58986+
58987+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58988+ task->acl != current->acl)
58989+ return 1;
58990+
58991+ return 0;
58992+}
58993+
58994+int
58995+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58996+{
58997+ struct task_struct *p;
58998+ int ret = 0;
58999+
59000+ if (unlikely(!(gr_status & GR_READY) || !pid))
59001+ return ret;
59002+
59003+ read_lock(&tasklist_lock);
59004+ do_each_pid_task(pid, type, p) {
59005+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
59006+ p->acl != current->acl) {
59007+ ret = 1;
59008+ goto out;
59009+ }
59010+ } while_each_pid_task(pid, type, p);
59011+out:
59012+ read_unlock(&tasklist_lock);
59013+
59014+ return ret;
59015+}
59016+
59017+void
59018+gr_copy_label(struct task_struct *tsk)
59019+{
59020+ tsk->signal->used_accept = 0;
59021+ tsk->acl_sp_role = 0;
59022+ tsk->acl_role_id = current->acl_role_id;
59023+ tsk->acl = current->acl;
59024+ tsk->role = current->role;
59025+ tsk->signal->curr_ip = current->signal->curr_ip;
59026+ tsk->signal->saved_ip = current->signal->saved_ip;
59027+ if (current->exec_file)
59028+ get_file(current->exec_file);
59029+ tsk->exec_file = current->exec_file;
59030+ tsk->is_writable = current->is_writable;
59031+ if (unlikely(current->signal->used_accept)) {
59032+ current->signal->curr_ip = 0;
59033+ current->signal->saved_ip = 0;
59034+ }
59035+
59036+ return;
59037+}
59038+
59039+static void
59040+gr_set_proc_res(struct task_struct *task)
59041+{
59042+ struct acl_subject_label *proc;
59043+ unsigned short i;
59044+
59045+ proc = task->acl;
59046+
59047+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
59048+ return;
59049+
59050+ for (i = 0; i < RLIM_NLIMITS; i++) {
59051+ if (!(proc->resmask & (1 << i)))
59052+ continue;
59053+
59054+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
59055+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
59056+ }
59057+
59058+ return;
59059+}
59060+
59061+extern int __gr_process_user_ban(struct user_struct *user);
59062+
59063+int
59064+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
59065+{
59066+ unsigned int i;
59067+ __u16 num;
59068+ uid_t *uidlist;
59069+ uid_t curuid;
59070+ int realok = 0;
59071+ int effectiveok = 0;
59072+ int fsok = 0;
59073+ uid_t globalreal, globaleffective, globalfs;
59074+
59075+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
59076+ struct user_struct *user;
59077+
59078+ if (!uid_valid(real))
59079+ goto skipit;
59080+
59081+ /* find user based on global namespace */
59082+
59083+ globalreal = GR_GLOBAL_UID(real);
59084+
59085+ user = find_user(make_kuid(&init_user_ns, globalreal));
59086+ if (user == NULL)
59087+ goto skipit;
59088+
59089+ if (__gr_process_user_ban(user)) {
59090+ /* for find_user */
59091+ free_uid(user);
59092+ return 1;
59093+ }
59094+
59095+ /* for find_user */
59096+ free_uid(user);
59097+
59098+skipit:
59099+#endif
59100+
59101+ if (unlikely(!(gr_status & GR_READY)))
59102+ return 0;
59103+
59104+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59105+ gr_log_learn_uid_change(real, effective, fs);
59106+
59107+ num = current->acl->user_trans_num;
59108+ uidlist = current->acl->user_transitions;
59109+
59110+ if (uidlist == NULL)
59111+ return 0;
59112+
59113+ if (!uid_valid(real)) {
59114+ realok = 1;
59115+ globalreal = (uid_t)-1;
59116+ } else {
59117+ globalreal = GR_GLOBAL_UID(real);
59118+ }
59119+ if (!uid_valid(effective)) {
59120+ effectiveok = 1;
59121+ globaleffective = (uid_t)-1;
59122+ } else {
59123+ globaleffective = GR_GLOBAL_UID(effective);
59124+ }
59125+ if (!uid_valid(fs)) {
59126+ fsok = 1;
59127+ globalfs = (uid_t)-1;
59128+ } else {
59129+ globalfs = GR_GLOBAL_UID(fs);
59130+ }
59131+
59132+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
59133+ for (i = 0; i < num; i++) {
59134+ curuid = uidlist[i];
59135+ if (globalreal == curuid)
59136+ realok = 1;
59137+ if (globaleffective == curuid)
59138+ effectiveok = 1;
59139+ if (globalfs == curuid)
59140+ fsok = 1;
59141+ }
59142+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
59143+ for (i = 0; i < num; i++) {
59144+ curuid = uidlist[i];
59145+ if (globalreal == curuid)
59146+ break;
59147+ if (globaleffective == curuid)
59148+ break;
59149+ if (globalfs == curuid)
59150+ break;
59151+ }
59152+ /* not in deny list */
59153+ if (i == num) {
59154+ realok = 1;
59155+ effectiveok = 1;
59156+ fsok = 1;
59157+ }
59158+ }
59159+
59160+ if (realok && effectiveok && fsok)
59161+ return 0;
59162+ else {
59163+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59164+ return 1;
59165+ }
59166+}
59167+
59168+int
59169+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
59170+{
59171+ unsigned int i;
59172+ __u16 num;
59173+ gid_t *gidlist;
59174+ gid_t curgid;
59175+ int realok = 0;
59176+ int effectiveok = 0;
59177+ int fsok = 0;
59178+ gid_t globalreal, globaleffective, globalfs;
59179+
59180+ if (unlikely(!(gr_status & GR_READY)))
59181+ return 0;
59182+
59183+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59184+ gr_log_learn_gid_change(real, effective, fs);
59185+
59186+ num = current->acl->group_trans_num;
59187+ gidlist = current->acl->group_transitions;
59188+
59189+ if (gidlist == NULL)
59190+ return 0;
59191+
59192+ if (!gid_valid(real)) {
59193+ realok = 1;
59194+ globalreal = (gid_t)-1;
59195+ } else {
59196+ globalreal = GR_GLOBAL_GID(real);
59197+ }
59198+ if (!gid_valid(effective)) {
59199+ effectiveok = 1;
59200+ globaleffective = (gid_t)-1;
59201+ } else {
59202+ globaleffective = GR_GLOBAL_GID(effective);
59203+ }
59204+ if (!gid_valid(fs)) {
59205+ fsok = 1;
59206+ globalfs = (gid_t)-1;
59207+ } else {
59208+ globalfs = GR_GLOBAL_GID(fs);
59209+ }
59210+
59211+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
59212+ for (i = 0; i < num; i++) {
59213+ curgid = gidlist[i];
59214+ if (globalreal == curgid)
59215+ realok = 1;
59216+ if (globaleffective == curgid)
59217+ effectiveok = 1;
59218+ if (globalfs == curgid)
59219+ fsok = 1;
59220+ }
59221+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
59222+ for (i = 0; i < num; i++) {
59223+ curgid = gidlist[i];
59224+ if (globalreal == curgid)
59225+ break;
59226+ if (globaleffective == curgid)
59227+ break;
59228+ if (globalfs == curgid)
59229+ break;
59230+ }
59231+ /* not in deny list */
59232+ if (i == num) {
59233+ realok = 1;
59234+ effectiveok = 1;
59235+ fsok = 1;
59236+ }
59237+ }
59238+
59239+ if (realok && effectiveok && fsok)
59240+ return 0;
59241+ else {
59242+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
59243+ return 1;
59244+ }
59245+}
59246+
59247+extern int gr_acl_is_capable(const int cap);
59248+
59249+void
59250+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
59251+{
59252+ struct acl_role_label *role = task->role;
59253+ struct acl_subject_label *subj = NULL;
59254+ struct acl_object_label *obj;
59255+ struct file *filp;
59256+ uid_t uid;
59257+ gid_t gid;
59258+
59259+ if (unlikely(!(gr_status & GR_READY)))
59260+ return;
59261+
59262+ uid = GR_GLOBAL_UID(kuid);
59263+ gid = GR_GLOBAL_GID(kgid);
59264+
59265+ filp = task->exec_file;
59266+
59267+ /* kernel process, we'll give them the kernel role */
59268+ if (unlikely(!filp)) {
59269+ task->role = kernel_role;
59270+ task->acl = kernel_role->root_label;
59271+ return;
59272+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
59273+ role = lookup_acl_role_label(task, uid, gid);
59274+
59275+ /* don't change the role if we're not a privileged process */
59276+ if (role && task->role != role &&
59277+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
59278+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
59279+ return;
59280+
59281+ /* perform subject lookup in possibly new role
59282+ we can use this result below in the case where role == task->role
59283+ */
59284+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
59285+
59286+ /* if we changed uid/gid, but result in the same role
59287+ and are using inheritance, don't lose the inherited subject
59288+ if current subject is other than what normal lookup
59289+ would result in, we arrived via inheritance, don't
59290+ lose subject
59291+ */
59292+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
59293+ (subj == task->acl)))
59294+ task->acl = subj;
59295+
59296+ task->role = role;
59297+
59298+ task->is_writable = 0;
59299+
59300+ /* ignore additional mmap checks for processes that are writable
59301+ by the default ACL */
59302+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59303+ if (unlikely(obj->mode & GR_WRITE))
59304+ task->is_writable = 1;
59305+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59306+ if (unlikely(obj->mode & GR_WRITE))
59307+ task->is_writable = 1;
59308+
59309+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59310+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
59311+#endif
59312+
59313+ gr_set_proc_res(task);
59314+
59315+ return;
59316+}
59317+
59318+int
59319+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
59320+ const int unsafe_flags)
59321+{
59322+ struct task_struct *task = current;
59323+ struct acl_subject_label *newacl;
59324+ struct acl_object_label *obj;
59325+ __u32 retmode;
59326+
59327+ if (unlikely(!(gr_status & GR_READY)))
59328+ return 0;
59329+
59330+ newacl = chk_subj_label(dentry, mnt, task->role);
59331+
59332+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
59333+ did an exec
59334+ */
59335+ rcu_read_lock();
59336+ read_lock(&tasklist_lock);
59337+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
59338+ (task->parent->acl->mode & GR_POVERRIDE))) {
59339+ read_unlock(&tasklist_lock);
59340+ rcu_read_unlock();
59341+ goto skip_check;
59342+ }
59343+ read_unlock(&tasklist_lock);
59344+ rcu_read_unlock();
59345+
59346+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
59347+ !(task->role->roletype & GR_ROLE_GOD) &&
59348+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
59349+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
59350+ if (unsafe_flags & LSM_UNSAFE_SHARE)
59351+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
59352+ else
59353+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
59354+ return -EACCES;
59355+ }
59356+
59357+skip_check:
59358+
59359+ obj = chk_obj_label(dentry, mnt, task->acl);
59360+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
59361+
59362+ if (!(task->acl->mode & GR_INHERITLEARN) &&
59363+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
59364+ if (obj->nested)
59365+ task->acl = obj->nested;
59366+ else
59367+ task->acl = newacl;
59368+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
59369+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
59370+
59371+ task->is_writable = 0;
59372+
59373+ /* ignore additional mmap checks for processes that are writable
59374+ by the default ACL */
59375+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
59376+ if (unlikely(obj->mode & GR_WRITE))
59377+ task->is_writable = 1;
59378+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
59379+ if (unlikely(obj->mode & GR_WRITE))
59380+ task->is_writable = 1;
59381+
59382+ gr_set_proc_res(task);
59383+
59384+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59385+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
59386+#endif
59387+ return 0;
59388+}
59389+
59390+/* always called with valid inodev ptr */
59391+static void
59392+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
59393+{
59394+ struct acl_object_label *matchpo;
59395+ struct acl_subject_label *matchps;
59396+ struct acl_subject_label *subj;
59397+ struct acl_role_label *role;
59398+ unsigned int x;
59399+
59400+ FOR_EACH_ROLE_START(role)
59401+ FOR_EACH_SUBJECT_START(role, subj, x)
59402+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59403+ matchpo->mode |= GR_DELETED;
59404+ FOR_EACH_SUBJECT_END(subj,x)
59405+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59406+ /* nested subjects aren't in the role's subj_hash table */
59407+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
59408+ matchpo->mode |= GR_DELETED;
59409+ FOR_EACH_NESTED_SUBJECT_END(subj)
59410+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
59411+ matchps->mode |= GR_DELETED;
59412+ FOR_EACH_ROLE_END(role)
59413+
59414+ inodev->nentry->deleted = 1;
59415+
59416+ return;
59417+}
59418+
59419+void
59420+gr_handle_delete(const ino_t ino, const dev_t dev)
59421+{
59422+ struct inodev_entry *inodev;
59423+
59424+ if (unlikely(!(gr_status & GR_READY)))
59425+ return;
59426+
59427+ write_lock(&gr_inode_lock);
59428+ inodev = lookup_inodev_entry(ino, dev);
59429+ if (inodev != NULL)
59430+ do_handle_delete(inodev, ino, dev);
59431+ write_unlock(&gr_inode_lock);
59432+
59433+ return;
59434+}
59435+
59436+static void
59437+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
59438+ const ino_t newinode, const dev_t newdevice,
59439+ struct acl_subject_label *subj)
59440+{
59441+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
59442+ struct acl_object_label *match;
59443+
59444+ match = subj->obj_hash[index];
59445+
59446+ while (match && (match->inode != oldinode ||
59447+ match->device != olddevice ||
59448+ !(match->mode & GR_DELETED)))
59449+ match = match->next;
59450+
59451+ if (match && (match->inode == oldinode)
59452+ && (match->device == olddevice)
59453+ && (match->mode & GR_DELETED)) {
59454+ if (match->prev == NULL) {
59455+ subj->obj_hash[index] = match->next;
59456+ if (match->next != NULL)
59457+ match->next->prev = NULL;
59458+ } else {
59459+ match->prev->next = match->next;
59460+ if (match->next != NULL)
59461+ match->next->prev = match->prev;
59462+ }
59463+ match->prev = NULL;
59464+ match->next = NULL;
59465+ match->inode = newinode;
59466+ match->device = newdevice;
59467+ match->mode &= ~GR_DELETED;
59468+
59469+ insert_acl_obj_label(match, subj);
59470+ }
59471+
59472+ return;
59473+}
59474+
59475+static void
59476+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
59477+ const ino_t newinode, const dev_t newdevice,
59478+ struct acl_role_label *role)
59479+{
59480+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
59481+ struct acl_subject_label *match;
59482+
59483+ match = role->subj_hash[index];
59484+
59485+ while (match && (match->inode != oldinode ||
59486+ match->device != olddevice ||
59487+ !(match->mode & GR_DELETED)))
59488+ match = match->next;
59489+
59490+ if (match && (match->inode == oldinode)
59491+ && (match->device == olddevice)
59492+ && (match->mode & GR_DELETED)) {
59493+ if (match->prev == NULL) {
59494+ role->subj_hash[index] = match->next;
59495+ if (match->next != NULL)
59496+ match->next->prev = NULL;
59497+ } else {
59498+ match->prev->next = match->next;
59499+ if (match->next != NULL)
59500+ match->next->prev = match->prev;
59501+ }
59502+ match->prev = NULL;
59503+ match->next = NULL;
59504+ match->inode = newinode;
59505+ match->device = newdevice;
59506+ match->mode &= ~GR_DELETED;
59507+
59508+ insert_acl_subj_label(match, role);
59509+ }
59510+
59511+ return;
59512+}
59513+
59514+static void
59515+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
59516+ const ino_t newinode, const dev_t newdevice)
59517+{
59518+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
59519+ struct inodev_entry *match;
59520+
59521+ match = inodev_set.i_hash[index];
59522+
59523+ while (match && (match->nentry->inode != oldinode ||
59524+ match->nentry->device != olddevice || !match->nentry->deleted))
59525+ match = match->next;
59526+
59527+ if (match && (match->nentry->inode == oldinode)
59528+ && (match->nentry->device == olddevice) &&
59529+ match->nentry->deleted) {
59530+ if (match->prev == NULL) {
59531+ inodev_set.i_hash[index] = match->next;
59532+ if (match->next != NULL)
59533+ match->next->prev = NULL;
59534+ } else {
59535+ match->prev->next = match->next;
59536+ if (match->next != NULL)
59537+ match->next->prev = match->prev;
59538+ }
59539+ match->prev = NULL;
59540+ match->next = NULL;
59541+ match->nentry->inode = newinode;
59542+ match->nentry->device = newdevice;
59543+ match->nentry->deleted = 0;
59544+
59545+ insert_inodev_entry(match);
59546+ }
59547+
59548+ return;
59549+}
59550+
59551+static void
59552+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
59553+{
59554+ struct acl_subject_label *subj;
59555+ struct acl_role_label *role;
59556+ unsigned int x;
59557+
59558+ FOR_EACH_ROLE_START(role)
59559+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
59560+
59561+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
59562+ if ((subj->inode == ino) && (subj->device == dev)) {
59563+ subj->inode = ino;
59564+ subj->device = dev;
59565+ }
59566+ /* nested subjects aren't in the role's subj_hash table */
59567+ update_acl_obj_label(matchn->inode, matchn->device,
59568+ ino, dev, subj);
59569+ FOR_EACH_NESTED_SUBJECT_END(subj)
59570+ FOR_EACH_SUBJECT_START(role, subj, x)
59571+ update_acl_obj_label(matchn->inode, matchn->device,
59572+ ino, dev, subj);
59573+ FOR_EACH_SUBJECT_END(subj,x)
59574+ FOR_EACH_ROLE_END(role)
59575+
59576+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
59577+
59578+ return;
59579+}
59580+
59581+static void
59582+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
59583+ const struct vfsmount *mnt)
59584+{
59585+ ino_t ino = dentry->d_inode->i_ino;
59586+ dev_t dev = __get_dev(dentry);
59587+
59588+ __do_handle_create(matchn, ino, dev);
59589+
59590+ return;
59591+}
59592+
59593+void
59594+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
59595+{
59596+ struct name_entry *matchn;
59597+
59598+ if (unlikely(!(gr_status & GR_READY)))
59599+ return;
59600+
59601+ preempt_disable();
59602+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
59603+
59604+ if (unlikely((unsigned long)matchn)) {
59605+ write_lock(&gr_inode_lock);
59606+ do_handle_create(matchn, dentry, mnt);
59607+ write_unlock(&gr_inode_lock);
59608+ }
59609+ preempt_enable();
59610+
59611+ return;
59612+}
59613+
59614+void
59615+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
59616+{
59617+ struct name_entry *matchn;
59618+
59619+ if (unlikely(!(gr_status & GR_READY)))
59620+ return;
59621+
59622+ preempt_disable();
59623+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
59624+
59625+ if (unlikely((unsigned long)matchn)) {
59626+ write_lock(&gr_inode_lock);
59627+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
59628+ write_unlock(&gr_inode_lock);
59629+ }
59630+ preempt_enable();
59631+
59632+ return;
59633+}
59634+
59635+void
59636+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59637+ struct dentry *old_dentry,
59638+ struct dentry *new_dentry,
59639+ struct vfsmount *mnt, const __u8 replace)
59640+{
59641+ struct name_entry *matchn;
59642+ struct inodev_entry *inodev;
59643+ struct inode *inode = new_dentry->d_inode;
59644+ ino_t old_ino = old_dentry->d_inode->i_ino;
59645+ dev_t old_dev = __get_dev(old_dentry);
59646+
59647+ /* vfs_rename swaps the name and parent link for old_dentry and
59648+ new_dentry
59649+ at this point, old_dentry has the new name, parent link, and inode
59650+ for the renamed file
59651+ if a file is being replaced by a rename, new_dentry has the inode
59652+ and name for the replaced file
59653+ */
59654+
59655+ if (unlikely(!(gr_status & GR_READY)))
59656+ return;
59657+
59658+ preempt_disable();
59659+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
59660+
59661+ /* we wouldn't have to check d_inode if it weren't for
59662+ NFS silly-renaming
59663+ */
59664+
59665+ write_lock(&gr_inode_lock);
59666+ if (unlikely(replace && inode)) {
59667+ ino_t new_ino = inode->i_ino;
59668+ dev_t new_dev = __get_dev(new_dentry);
59669+
59670+ inodev = lookup_inodev_entry(new_ino, new_dev);
59671+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
59672+ do_handle_delete(inodev, new_ino, new_dev);
59673+ }
59674+
59675+ inodev = lookup_inodev_entry(old_ino, old_dev);
59676+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
59677+ do_handle_delete(inodev, old_ino, old_dev);
59678+
59679+ if (unlikely((unsigned long)matchn))
59680+ do_handle_create(matchn, old_dentry, mnt);
59681+
59682+ write_unlock(&gr_inode_lock);
59683+ preempt_enable();
59684+
59685+ return;
59686+}
59687+
59688+static int
59689+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
59690+ unsigned char **sum)
59691+{
59692+ struct acl_role_label *r;
59693+ struct role_allowed_ip *ipp;
59694+ struct role_transition *trans;
59695+ unsigned int i;
59696+ int found = 0;
59697+ u32 curr_ip = current->signal->curr_ip;
59698+
59699+ current->signal->saved_ip = curr_ip;
59700+
59701+ /* check transition table */
59702+
59703+ for (trans = current->role->transitions; trans; trans = trans->next) {
59704+ if (!strcmp(rolename, trans->rolename)) {
59705+ found = 1;
59706+ break;
59707+ }
59708+ }
59709+
59710+ if (!found)
59711+ return 0;
59712+
59713+ /* handle special roles that do not require authentication
59714+ and check ip */
59715+
59716+ FOR_EACH_ROLE_START(r)
59717+ if (!strcmp(rolename, r->rolename) &&
59718+ (r->roletype & GR_ROLE_SPECIAL)) {
59719+ found = 0;
59720+ if (r->allowed_ips != NULL) {
59721+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
59722+ if ((ntohl(curr_ip) & ipp->netmask) ==
59723+ (ntohl(ipp->addr) & ipp->netmask))
59724+ found = 1;
59725+ }
59726+ } else
59727+ found = 2;
59728+ if (!found)
59729+ return 0;
59730+
59731+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
59732+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
59733+ *salt = NULL;
59734+ *sum = NULL;
59735+ return 1;
59736+ }
59737+ }
59738+ FOR_EACH_ROLE_END(r)
59739+
59740+ for (i = 0; i < num_sprole_pws; i++) {
59741+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
59742+ *salt = acl_special_roles[i]->salt;
59743+ *sum = acl_special_roles[i]->sum;
59744+ return 1;
59745+ }
59746+ }
59747+
59748+ return 0;
59749+}
59750+
59751+static void
59752+assign_special_role(char *rolename)
59753+{
59754+ struct acl_object_label *obj;
59755+ struct acl_role_label *r;
59756+ struct acl_role_label *assigned = NULL;
59757+ struct task_struct *tsk;
59758+ struct file *filp;
59759+
59760+ FOR_EACH_ROLE_START(r)
59761+ if (!strcmp(rolename, r->rolename) &&
59762+ (r->roletype & GR_ROLE_SPECIAL)) {
59763+ assigned = r;
59764+ break;
59765+ }
59766+ FOR_EACH_ROLE_END(r)
59767+
59768+ if (!assigned)
59769+ return;
59770+
59771+ read_lock(&tasklist_lock);
59772+ read_lock(&grsec_exec_file_lock);
59773+
59774+ tsk = current->real_parent;
59775+ if (tsk == NULL)
59776+ goto out_unlock;
59777+
59778+ filp = tsk->exec_file;
59779+ if (filp == NULL)
59780+ goto out_unlock;
59781+
59782+ tsk->is_writable = 0;
59783+
59784+ tsk->acl_sp_role = 1;
59785+ tsk->acl_role_id = ++acl_sp_role_value;
59786+ tsk->role = assigned;
59787+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59788+
59789+ /* ignore additional mmap checks for processes that are writable
59790+ by the default ACL */
59791+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59792+ if (unlikely(obj->mode & GR_WRITE))
59793+ tsk->is_writable = 1;
59794+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59795+ if (unlikely(obj->mode & GR_WRITE))
59796+ tsk->is_writable = 1;
59797+
59798+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59799+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
59800+#endif
59801+
59802+out_unlock:
59803+ read_unlock(&grsec_exec_file_lock);
59804+ read_unlock(&tasklist_lock);
59805+ return;
59806+}
59807+
59808+int gr_check_secure_terminal(struct task_struct *task)
59809+{
59810+ struct task_struct *p, *p2, *p3;
59811+ struct files_struct *files;
59812+ struct fdtable *fdt;
59813+ struct file *our_file = NULL, *file;
59814+ int i;
59815+
59816+ if (task->signal->tty == NULL)
59817+ return 1;
59818+
59819+ files = get_files_struct(task);
59820+ if (files != NULL) {
59821+ rcu_read_lock();
59822+ fdt = files_fdtable(files);
59823+ for (i=0; i < fdt->max_fds; i++) {
59824+ file = fcheck_files(files, i);
59825+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59826+ get_file(file);
59827+ our_file = file;
59828+ }
59829+ }
59830+ rcu_read_unlock();
59831+ put_files_struct(files);
59832+ }
59833+
59834+ if (our_file == NULL)
59835+ return 1;
59836+
59837+ read_lock(&tasklist_lock);
59838+ do_each_thread(p2, p) {
59839+ files = get_files_struct(p);
59840+ if (files == NULL ||
59841+ (p->signal && p->signal->tty == task->signal->tty)) {
59842+ if (files != NULL)
59843+ put_files_struct(files);
59844+ continue;
59845+ }
59846+ rcu_read_lock();
59847+ fdt = files_fdtable(files);
59848+ for (i=0; i < fdt->max_fds; i++) {
59849+ file = fcheck_files(files, i);
59850+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59851+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59852+ p3 = task;
59853+ while (task_pid_nr(p3) > 0) {
59854+ if (p3 == p)
59855+ break;
59856+ p3 = p3->real_parent;
59857+ }
59858+ if (p3 == p)
59859+ break;
59860+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59861+ gr_handle_alertkill(p);
59862+ rcu_read_unlock();
59863+ put_files_struct(files);
59864+ read_unlock(&tasklist_lock);
59865+ fput(our_file);
59866+ return 0;
59867+ }
59868+ }
59869+ rcu_read_unlock();
59870+ put_files_struct(files);
59871+ } while_each_thread(p2, p);
59872+ read_unlock(&tasklist_lock);
59873+
59874+ fput(our_file);
59875+ return 1;
59876+}
59877+
59878+static int gr_rbac_disable(void *unused)
59879+{
59880+ pax_open_kernel();
59881+ gr_status &= ~GR_READY;
59882+ pax_close_kernel();
59883+
59884+ return 0;
59885+}
59886+
59887+ssize_t
59888+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59889+{
59890+ struct gr_arg_wrapper uwrap;
59891+ unsigned char *sprole_salt = NULL;
59892+ unsigned char *sprole_sum = NULL;
59893+ int error = sizeof (struct gr_arg_wrapper);
59894+ int error2 = 0;
59895+
59896+ mutex_lock(&gr_dev_mutex);
59897+
59898+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59899+ error = -EPERM;
59900+ goto out;
59901+ }
59902+
59903+ if (count != sizeof (struct gr_arg_wrapper)) {
59904+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59905+ error = -EINVAL;
59906+ goto out;
59907+ }
59908+
59909+
59910+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59911+ gr_auth_expires = 0;
59912+ gr_auth_attempts = 0;
59913+ }
59914+
59915+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59916+ error = -EFAULT;
59917+ goto out;
59918+ }
59919+
59920+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59921+ error = -EINVAL;
59922+ goto out;
59923+ }
59924+
59925+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59926+ error = -EFAULT;
59927+ goto out;
59928+ }
59929+
59930+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59931+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59932+ time_after(gr_auth_expires, get_seconds())) {
59933+ error = -EBUSY;
59934+ goto out;
59935+ }
59936+
59937+ /* if non-root trying to do anything other than use a special role,
59938+ do not attempt authentication, do not count towards authentication
59939+ locking
59940+ */
59941+
59942+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59943+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59944+ gr_is_global_nonroot(current_uid())) {
59945+ error = -EPERM;
59946+ goto out;
59947+ }
59948+
59949+ /* ensure pw and special role name are null terminated */
59950+
59951+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59952+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59953+
59954+ /* Okay.
59955+ * We have our enough of the argument structure..(we have yet
59956+ * to copy_from_user the tables themselves) . Copy the tables
59957+ * only if we need them, i.e. for loading operations. */
59958+
59959+ switch (gr_usermode->mode) {
59960+ case GR_STATUS:
59961+ if (gr_status & GR_READY) {
59962+ error = 1;
59963+ if (!gr_check_secure_terminal(current))
59964+ error = 3;
59965+ } else
59966+ error = 2;
59967+ goto out;
59968+ case GR_SHUTDOWN:
59969+ if ((gr_status & GR_READY)
59970+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59971+ stop_machine(gr_rbac_disable, NULL, NULL);
59972+ free_variables();
59973+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59974+ memset(gr_system_salt, 0, GR_SALT_LEN);
59975+ memset(gr_system_sum, 0, GR_SHA_LEN);
59976+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59977+ } else if (gr_status & GR_READY) {
59978+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59979+ error = -EPERM;
59980+ } else {
59981+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59982+ error = -EAGAIN;
59983+ }
59984+ break;
59985+ case GR_ENABLE:
59986+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59987+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59988+ else {
59989+ if (gr_status & GR_READY)
59990+ error = -EAGAIN;
59991+ else
59992+ error = error2;
59993+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59994+ }
59995+ break;
59996+ case GR_RELOAD:
59997+ if (!(gr_status & GR_READY)) {
59998+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59999+ error = -EAGAIN;
60000+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60001+ stop_machine(gr_rbac_disable, NULL, NULL);
60002+ free_variables();
60003+ error2 = gracl_init(gr_usermode);
60004+ if (!error2)
60005+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
60006+ else {
60007+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60008+ error = error2;
60009+ }
60010+ } else {
60011+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
60012+ error = -EPERM;
60013+ }
60014+ break;
60015+ case GR_SEGVMOD:
60016+ if (unlikely(!(gr_status & GR_READY))) {
60017+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
60018+ error = -EAGAIN;
60019+ break;
60020+ }
60021+
60022+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
60023+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
60024+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
60025+ struct acl_subject_label *segvacl;
60026+ segvacl =
60027+ lookup_acl_subj_label(gr_usermode->segv_inode,
60028+ gr_usermode->segv_device,
60029+ current->role);
60030+ if (segvacl) {
60031+ segvacl->crashes = 0;
60032+ segvacl->expires = 0;
60033+ }
60034+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
60035+ gr_remove_uid(gr_usermode->segv_uid);
60036+ }
60037+ } else {
60038+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
60039+ error = -EPERM;
60040+ }
60041+ break;
60042+ case GR_SPROLE:
60043+ case GR_SPROLEPAM:
60044+ if (unlikely(!(gr_status & GR_READY))) {
60045+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
60046+ error = -EAGAIN;
60047+ break;
60048+ }
60049+
60050+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
60051+ current->role->expires = 0;
60052+ current->role->auth_attempts = 0;
60053+ }
60054+
60055+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
60056+ time_after(current->role->expires, get_seconds())) {
60057+ error = -EBUSY;
60058+ goto out;
60059+ }
60060+
60061+ if (lookup_special_role_auth
60062+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
60063+ && ((!sprole_salt && !sprole_sum)
60064+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
60065+ char *p = "";
60066+ assign_special_role(gr_usermode->sp_role);
60067+ read_lock(&tasklist_lock);
60068+ if (current->real_parent)
60069+ p = current->real_parent->role->rolename;
60070+ read_unlock(&tasklist_lock);
60071+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
60072+ p, acl_sp_role_value);
60073+ } else {
60074+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
60075+ error = -EPERM;
60076+ if(!(current->role->auth_attempts++))
60077+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60078+
60079+ goto out;
60080+ }
60081+ break;
60082+ case GR_UNSPROLE:
60083+ if (unlikely(!(gr_status & GR_READY))) {
60084+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
60085+ error = -EAGAIN;
60086+ break;
60087+ }
60088+
60089+ if (current->role->roletype & GR_ROLE_SPECIAL) {
60090+ char *p = "";
60091+ int i = 0;
60092+
60093+ read_lock(&tasklist_lock);
60094+ if (current->real_parent) {
60095+ p = current->real_parent->role->rolename;
60096+ i = current->real_parent->acl_role_id;
60097+ }
60098+ read_unlock(&tasklist_lock);
60099+
60100+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
60101+ gr_set_acls(1);
60102+ } else {
60103+ error = -EPERM;
60104+ goto out;
60105+ }
60106+ break;
60107+ default:
60108+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
60109+ error = -EINVAL;
60110+ break;
60111+ }
60112+
60113+ if (error != -EPERM)
60114+ goto out;
60115+
60116+ if(!(gr_auth_attempts++))
60117+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
60118+
60119+ out:
60120+ mutex_unlock(&gr_dev_mutex);
60121+ return error;
60122+}
60123+
60124+/* must be called with
60125+ rcu_read_lock();
60126+ read_lock(&tasklist_lock);
60127+ read_lock(&grsec_exec_file_lock);
60128+*/
60129+int gr_apply_subject_to_task(struct task_struct *task)
60130+{
60131+ struct acl_object_label *obj;
60132+ char *tmpname;
60133+ struct acl_subject_label *tmpsubj;
60134+ struct file *filp;
60135+ struct name_entry *nmatch;
60136+
60137+ filp = task->exec_file;
60138+ if (filp == NULL)
60139+ return 0;
60140+
60141+ /* the following is to apply the correct subject
60142+ on binaries running when the RBAC system
60143+ is enabled, when the binaries have been
60144+ replaced or deleted since their execution
60145+ -----
60146+ when the RBAC system starts, the inode/dev
60147+ from exec_file will be one the RBAC system
60148+ is unaware of. It only knows the inode/dev
60149+ of the present file on disk, or the absence
60150+ of it.
60151+ */
60152+ preempt_disable();
60153+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
60154+
60155+ nmatch = lookup_name_entry(tmpname);
60156+ preempt_enable();
60157+ tmpsubj = NULL;
60158+ if (nmatch) {
60159+ if (nmatch->deleted)
60160+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
60161+ else
60162+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
60163+ if (tmpsubj != NULL)
60164+ task->acl = tmpsubj;
60165+ }
60166+ if (tmpsubj == NULL)
60167+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
60168+ task->role);
60169+ if (task->acl) {
60170+ task->is_writable = 0;
60171+ /* ignore additional mmap checks for processes that are writable
60172+ by the default ACL */
60173+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60174+ if (unlikely(obj->mode & GR_WRITE))
60175+ task->is_writable = 1;
60176+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60177+ if (unlikely(obj->mode & GR_WRITE))
60178+ task->is_writable = 1;
60179+
60180+ gr_set_proc_res(task);
60181+
60182+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60183+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60184+#endif
60185+ } else {
60186+ return 1;
60187+ }
60188+
60189+ return 0;
60190+}
60191+
60192+int
60193+gr_set_acls(const int type)
60194+{
60195+ struct task_struct *task, *task2;
60196+ struct acl_role_label *role = current->role;
60197+ __u16 acl_role_id = current->acl_role_id;
60198+ const struct cred *cred;
60199+ int ret;
60200+
60201+ rcu_read_lock();
60202+ read_lock(&tasklist_lock);
60203+ read_lock(&grsec_exec_file_lock);
60204+ do_each_thread(task2, task) {
60205+ /* check to see if we're called from the exit handler,
60206+ if so, only replace ACLs that have inherited the admin
60207+ ACL */
60208+
60209+ if (type && (task->role != role ||
60210+ task->acl_role_id != acl_role_id))
60211+ continue;
60212+
60213+ task->acl_role_id = 0;
60214+ task->acl_sp_role = 0;
60215+
60216+ if (task->exec_file) {
60217+ cred = __task_cred(task);
60218+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
60219+ ret = gr_apply_subject_to_task(task);
60220+ if (ret) {
60221+ read_unlock(&grsec_exec_file_lock);
60222+ read_unlock(&tasklist_lock);
60223+ rcu_read_unlock();
60224+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
60225+ return ret;
60226+ }
60227+ } else {
60228+ // it's a kernel process
60229+ task->role = kernel_role;
60230+ task->acl = kernel_role->root_label;
60231+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
60232+ task->acl->mode &= ~GR_PROCFIND;
60233+#endif
60234+ }
60235+ } while_each_thread(task2, task);
60236+ read_unlock(&grsec_exec_file_lock);
60237+ read_unlock(&tasklist_lock);
60238+ rcu_read_unlock();
60239+
60240+ return 0;
60241+}
60242+
60243+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
60244+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
60245+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
60246+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
60247+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
60248+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
60249+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
60250+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
60251+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
60252+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
60253+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
60254+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
60255+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
60256+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
60257+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
60258+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
60259+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
60260+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
60261+};
60262+
60263+void
60264+gr_learn_resource(const struct task_struct *task,
60265+ const int res, const unsigned long wanted, const int gt)
60266+{
60267+ struct acl_subject_label *acl;
60268+ const struct cred *cred;
60269+
60270+ if (unlikely((gr_status & GR_READY) &&
60271+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
60272+ goto skip_reslog;
60273+
60274+ gr_log_resource(task, res, wanted, gt);
60275+skip_reslog:
60276+
60277+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
60278+ return;
60279+
60280+ acl = task->acl;
60281+
60282+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
60283+ !(acl->resmask & (1 << (unsigned short) res))))
60284+ return;
60285+
60286+ if (wanted >= acl->res[res].rlim_cur) {
60287+ unsigned long res_add;
60288+
60289+ res_add = wanted + res_learn_bumps[res];
60290+
60291+ acl->res[res].rlim_cur = res_add;
60292+
60293+ if (wanted > acl->res[res].rlim_max)
60294+ acl->res[res].rlim_max = res_add;
60295+
60296+ /* only log the subject filename, since resource logging is supported for
60297+ single-subject learning only */
60298+ rcu_read_lock();
60299+ cred = __task_cred(task);
60300+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60301+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
60302+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
60303+ "", (unsigned long) res, &task->signal->saved_ip);
60304+ rcu_read_unlock();
60305+ }
60306+
60307+ return;
60308+}
60309+EXPORT_SYMBOL(gr_learn_resource);
60310+#endif
60311+
60312+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
60313+void
60314+pax_set_initial_flags(struct linux_binprm *bprm)
60315+{
60316+ struct task_struct *task = current;
60317+ struct acl_subject_label *proc;
60318+ unsigned long flags;
60319+
60320+ if (unlikely(!(gr_status & GR_READY)))
60321+ return;
60322+
60323+ flags = pax_get_flags(task);
60324+
60325+ proc = task->acl;
60326+
60327+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
60328+ flags &= ~MF_PAX_PAGEEXEC;
60329+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
60330+ flags &= ~MF_PAX_SEGMEXEC;
60331+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
60332+ flags &= ~MF_PAX_RANDMMAP;
60333+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
60334+ flags &= ~MF_PAX_EMUTRAMP;
60335+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
60336+ flags &= ~MF_PAX_MPROTECT;
60337+
60338+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
60339+ flags |= MF_PAX_PAGEEXEC;
60340+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
60341+ flags |= MF_PAX_SEGMEXEC;
60342+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
60343+ flags |= MF_PAX_RANDMMAP;
60344+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
60345+ flags |= MF_PAX_EMUTRAMP;
60346+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
60347+ flags |= MF_PAX_MPROTECT;
60348+
60349+ pax_set_flags(task, flags);
60350+
60351+ return;
60352+}
60353+#endif
60354+
60355+int
60356+gr_handle_proc_ptrace(struct task_struct *task)
60357+{
60358+ struct file *filp;
60359+ struct task_struct *tmp = task;
60360+ struct task_struct *curtemp = current;
60361+ __u32 retmode;
60362+
60363+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60364+ if (unlikely(!(gr_status & GR_READY)))
60365+ return 0;
60366+#endif
60367+
60368+ read_lock(&tasklist_lock);
60369+ read_lock(&grsec_exec_file_lock);
60370+ filp = task->exec_file;
60371+
60372+ while (task_pid_nr(tmp) > 0) {
60373+ if (tmp == curtemp)
60374+ break;
60375+ tmp = tmp->real_parent;
60376+ }
60377+
60378+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
60379+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
60380+ read_unlock(&grsec_exec_file_lock);
60381+ read_unlock(&tasklist_lock);
60382+ return 1;
60383+ }
60384+
60385+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60386+ if (!(gr_status & GR_READY)) {
60387+ read_unlock(&grsec_exec_file_lock);
60388+ read_unlock(&tasklist_lock);
60389+ return 0;
60390+ }
60391+#endif
60392+
60393+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
60394+ read_unlock(&grsec_exec_file_lock);
60395+ read_unlock(&tasklist_lock);
60396+
60397+ if (retmode & GR_NOPTRACE)
60398+ return 1;
60399+
60400+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
60401+ && (current->acl != task->acl || (current->acl != current->role->root_label
60402+ && task_pid_nr(current) != task_pid_nr(task))))
60403+ return 1;
60404+
60405+ return 0;
60406+}
60407+
60408+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
60409+{
60410+ if (unlikely(!(gr_status & GR_READY)))
60411+ return;
60412+
60413+ if (!(current->role->roletype & GR_ROLE_GOD))
60414+ return;
60415+
60416+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
60417+ p->role->rolename, gr_task_roletype_to_char(p),
60418+ p->acl->filename);
60419+}
60420+
60421+int
60422+gr_handle_ptrace(struct task_struct *task, const long request)
60423+{
60424+ struct task_struct *tmp = task;
60425+ struct task_struct *curtemp = current;
60426+ __u32 retmode;
60427+
60428+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
60429+ if (unlikely(!(gr_status & GR_READY)))
60430+ return 0;
60431+#endif
60432+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
60433+ read_lock(&tasklist_lock);
60434+ while (task_pid_nr(tmp) > 0) {
60435+ if (tmp == curtemp)
60436+ break;
60437+ tmp = tmp->real_parent;
60438+ }
60439+
60440+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
60441+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
60442+ read_unlock(&tasklist_lock);
60443+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60444+ return 1;
60445+ }
60446+ read_unlock(&tasklist_lock);
60447+ }
60448+
60449+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
60450+ if (!(gr_status & GR_READY))
60451+ return 0;
60452+#endif
60453+
60454+ read_lock(&grsec_exec_file_lock);
60455+ if (unlikely(!task->exec_file)) {
60456+ read_unlock(&grsec_exec_file_lock);
60457+ return 0;
60458+ }
60459+
60460+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
60461+ read_unlock(&grsec_exec_file_lock);
60462+
60463+ if (retmode & GR_NOPTRACE) {
60464+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60465+ return 1;
60466+ }
60467+
60468+ if (retmode & GR_PTRACERD) {
60469+ switch (request) {
60470+ case PTRACE_SEIZE:
60471+ case PTRACE_POKETEXT:
60472+ case PTRACE_POKEDATA:
60473+ case PTRACE_POKEUSR:
60474+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
60475+ case PTRACE_SETREGS:
60476+ case PTRACE_SETFPREGS:
60477+#endif
60478+#ifdef CONFIG_X86
60479+ case PTRACE_SETFPXREGS:
60480+#endif
60481+#ifdef CONFIG_ALTIVEC
60482+ case PTRACE_SETVRREGS:
60483+#endif
60484+ return 1;
60485+ default:
60486+ return 0;
60487+ }
60488+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
60489+ !(current->role->roletype & GR_ROLE_GOD) &&
60490+ (current->acl != task->acl)) {
60491+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
60492+ return 1;
60493+ }
60494+
60495+ return 0;
60496+}
60497+
60498+static int is_writable_mmap(const struct file *filp)
60499+{
60500+ struct task_struct *task = current;
60501+ struct acl_object_label *obj, *obj2;
60502+
60503+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
60504+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
60505+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60506+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
60507+ task->role->root_label);
60508+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
60509+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
60510+ return 1;
60511+ }
60512+ }
60513+ return 0;
60514+}
60515+
60516+int
60517+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
60518+{
60519+ __u32 mode;
60520+
60521+ if (unlikely(!file || !(prot & PROT_EXEC)))
60522+ return 1;
60523+
60524+ if (is_writable_mmap(file))
60525+ return 0;
60526+
60527+ mode =
60528+ gr_search_file(file->f_path.dentry,
60529+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60530+ file->f_path.mnt);
60531+
60532+ if (!gr_tpe_allow(file))
60533+ return 0;
60534+
60535+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60536+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60537+ return 0;
60538+ } else if (unlikely(!(mode & GR_EXEC))) {
60539+ return 0;
60540+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60541+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60542+ return 1;
60543+ }
60544+
60545+ return 1;
60546+}
60547+
60548+int
60549+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
60550+{
60551+ __u32 mode;
60552+
60553+ if (unlikely(!file || !(prot & PROT_EXEC)))
60554+ return 1;
60555+
60556+ if (is_writable_mmap(file))
60557+ return 0;
60558+
60559+ mode =
60560+ gr_search_file(file->f_path.dentry,
60561+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60562+ file->f_path.mnt);
60563+
60564+ if (!gr_tpe_allow(file))
60565+ return 0;
60566+
60567+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60568+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60569+ return 0;
60570+ } else if (unlikely(!(mode & GR_EXEC))) {
60571+ return 0;
60572+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60573+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60574+ return 1;
60575+ }
60576+
60577+ return 1;
60578+}
60579+
60580+void
60581+gr_acl_handle_psacct(struct task_struct *task, const long code)
60582+{
60583+ unsigned long runtime;
60584+ unsigned long cputime;
60585+ unsigned int wday, cday;
60586+ __u8 whr, chr;
60587+ __u8 wmin, cmin;
60588+ __u8 wsec, csec;
60589+ struct timespec timeval;
60590+
60591+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60592+ !(task->acl->mode & GR_PROCACCT)))
60593+ return;
60594+
60595+ do_posix_clock_monotonic_gettime(&timeval);
60596+ runtime = timeval.tv_sec - task->start_time.tv_sec;
60597+ wday = runtime / (3600 * 24);
60598+ runtime -= wday * (3600 * 24);
60599+ whr = runtime / 3600;
60600+ runtime -= whr * 3600;
60601+ wmin = runtime / 60;
60602+ runtime -= wmin * 60;
60603+ wsec = runtime;
60604+
60605+ cputime = (task->utime + task->stime) / HZ;
60606+ cday = cputime / (3600 * 24);
60607+ cputime -= cday * (3600 * 24);
60608+ chr = cputime / 3600;
60609+ cputime -= chr * 3600;
60610+ cmin = cputime / 60;
60611+ cputime -= cmin * 60;
60612+ csec = cputime;
60613+
60614+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60615+
60616+ return;
60617+}
60618+
60619+void gr_set_kernel_label(struct task_struct *task)
60620+{
60621+ if (gr_status & GR_READY) {
60622+ task->role = kernel_role;
60623+ task->acl = kernel_role->root_label;
60624+ }
60625+ return;
60626+}
60627+
60628+#ifdef CONFIG_TASKSTATS
60629+int gr_is_taskstats_denied(int pid)
60630+{
60631+ struct task_struct *task;
60632+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60633+ const struct cred *cred;
60634+#endif
60635+ int ret = 0;
60636+
60637+ /* restrict taskstats viewing to un-chrooted root users
60638+ who have the 'view' subject flag if the RBAC system is enabled
60639+ */
60640+
60641+ rcu_read_lock();
60642+ read_lock(&tasklist_lock);
60643+ task = find_task_by_vpid(pid);
60644+ if (task) {
60645+#ifdef CONFIG_GRKERNSEC_CHROOT
60646+ if (proc_is_chrooted(task))
60647+ ret = -EACCES;
60648+#endif
60649+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60650+ cred = __task_cred(task);
60651+#ifdef CONFIG_GRKERNSEC_PROC_USER
60652+ if (gr_is_global_nonroot(cred->uid))
60653+ ret = -EACCES;
60654+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60655+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
60656+ ret = -EACCES;
60657+#endif
60658+#endif
60659+ if (gr_status & GR_READY) {
60660+ if (!(task->acl->mode & GR_VIEW))
60661+ ret = -EACCES;
60662+ }
60663+ } else
60664+ ret = -ENOENT;
60665+
60666+ read_unlock(&tasklist_lock);
60667+ rcu_read_unlock();
60668+
60669+ return ret;
60670+}
60671+#endif
60672+
60673+/* AUXV entries are filled via a descendant of search_binary_handler
60674+ after we've already applied the subject for the target
60675+*/
60676+int gr_acl_enable_at_secure(void)
60677+{
60678+ if (unlikely(!(gr_status & GR_READY)))
60679+ return 0;
60680+
60681+ if (current->acl->mode & GR_ATSECURE)
60682+ return 1;
60683+
60684+ return 0;
60685+}
60686+
60687+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60688+{
60689+ struct task_struct *task = current;
60690+ struct dentry *dentry = file->f_path.dentry;
60691+ struct vfsmount *mnt = file->f_path.mnt;
60692+ struct acl_object_label *obj, *tmp;
60693+ struct acl_subject_label *subj;
60694+ unsigned int bufsize;
60695+ int is_not_root;
60696+ char *path;
60697+ dev_t dev = __get_dev(dentry);
60698+
60699+ if (unlikely(!(gr_status & GR_READY)))
60700+ return 1;
60701+
60702+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60703+ return 1;
60704+
60705+ /* ignore Eric Biederman */
60706+ if (IS_PRIVATE(dentry->d_inode))
60707+ return 1;
60708+
60709+ subj = task->acl;
60710+ read_lock(&gr_inode_lock);
60711+ do {
60712+ obj = lookup_acl_obj_label(ino, dev, subj);
60713+ if (obj != NULL) {
60714+ read_unlock(&gr_inode_lock);
60715+ return (obj->mode & GR_FIND) ? 1 : 0;
60716+ }
60717+ } while ((subj = subj->parent_subject));
60718+ read_unlock(&gr_inode_lock);
60719+
60720+ /* this is purely an optimization since we're looking for an object
60721+ for the directory we're doing a readdir on
60722+ if it's possible for any globbed object to match the entry we're
60723+ filling into the directory, then the object we find here will be
60724+ an anchor point with attached globbed objects
60725+ */
60726+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60727+ if (obj->globbed == NULL)
60728+ return (obj->mode & GR_FIND) ? 1 : 0;
60729+
60730+ is_not_root = ((obj->filename[0] == '/') &&
60731+ (obj->filename[1] == '\0')) ? 0 : 1;
60732+ bufsize = PAGE_SIZE - namelen - is_not_root;
60733+
60734+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60735+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60736+ return 1;
60737+
60738+ preempt_disable();
60739+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60740+ bufsize);
60741+
60742+ bufsize = strlen(path);
60743+
60744+ /* if base is "/", don't append an additional slash */
60745+ if (is_not_root)
60746+ *(path + bufsize) = '/';
60747+ memcpy(path + bufsize + is_not_root, name, namelen);
60748+ *(path + bufsize + namelen + is_not_root) = '\0';
60749+
60750+ tmp = obj->globbed;
60751+ while (tmp) {
60752+ if (!glob_match(tmp->filename, path)) {
60753+ preempt_enable();
60754+ return (tmp->mode & GR_FIND) ? 1 : 0;
60755+ }
60756+ tmp = tmp->next;
60757+ }
60758+ preempt_enable();
60759+ return (obj->mode & GR_FIND) ? 1 : 0;
60760+}
60761+
60762+void gr_put_exec_file(struct task_struct *task)
60763+{
60764+ struct file *filp;
60765+
60766+ write_lock(&grsec_exec_file_lock);
60767+ filp = task->exec_file;
60768+ task->exec_file = NULL;
60769+ write_unlock(&grsec_exec_file_lock);
60770+
60771+ if (filp)
60772+ fput(filp);
60773+
60774+ return;
60775+}
60776+
60777+
60778+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60779+EXPORT_SYMBOL(gr_acl_is_enabled);
60780+#endif
60781+EXPORT_SYMBOL(gr_set_kernel_label);
60782+#ifdef CONFIG_SECURITY
60783+EXPORT_SYMBOL(gr_check_user_change);
60784+EXPORT_SYMBOL(gr_check_group_change);
60785+#endif
60786+
60787diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60788new file mode 100644
60789index 0000000..34fefda
60790--- /dev/null
60791+++ b/grsecurity/gracl_alloc.c
60792@@ -0,0 +1,105 @@
60793+#include <linux/kernel.h>
60794+#include <linux/mm.h>
60795+#include <linux/slab.h>
60796+#include <linux/vmalloc.h>
60797+#include <linux/gracl.h>
60798+#include <linux/grsecurity.h>
60799+
60800+static unsigned long alloc_stack_next = 1;
60801+static unsigned long alloc_stack_size = 1;
60802+static void **alloc_stack;
60803+
60804+static __inline__ int
60805+alloc_pop(void)
60806+{
60807+ if (alloc_stack_next == 1)
60808+ return 0;
60809+
60810+ kfree(alloc_stack[alloc_stack_next - 2]);
60811+
60812+ alloc_stack_next--;
60813+
60814+ return 1;
60815+}
60816+
60817+static __inline__ int
60818+alloc_push(void *buf)
60819+{
60820+ if (alloc_stack_next >= alloc_stack_size)
60821+ return 1;
60822+
60823+ alloc_stack[alloc_stack_next - 1] = buf;
60824+
60825+ alloc_stack_next++;
60826+
60827+ return 0;
60828+}
60829+
60830+void *
60831+acl_alloc(unsigned long len)
60832+{
60833+ void *ret = NULL;
60834+
60835+ if (!len || len > PAGE_SIZE)
60836+ goto out;
60837+
60838+ ret = kmalloc(len, GFP_KERNEL);
60839+
60840+ if (ret) {
60841+ if (alloc_push(ret)) {
60842+ kfree(ret);
60843+ ret = NULL;
60844+ }
60845+ }
60846+
60847+out:
60848+ return ret;
60849+}
60850+
60851+void *
60852+acl_alloc_num(unsigned long num, unsigned long len)
60853+{
60854+ if (!len || (num > (PAGE_SIZE / len)))
60855+ return NULL;
60856+
60857+ return acl_alloc(num * len);
60858+}
60859+
60860+void
60861+acl_free_all(void)
60862+{
60863+ if (gr_acl_is_enabled() || !alloc_stack)
60864+ return;
60865+
60866+ while (alloc_pop()) ;
60867+
60868+ if (alloc_stack) {
60869+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60870+ kfree(alloc_stack);
60871+ else
60872+ vfree(alloc_stack);
60873+ }
60874+
60875+ alloc_stack = NULL;
60876+ alloc_stack_size = 1;
60877+ alloc_stack_next = 1;
60878+
60879+ return;
60880+}
60881+
60882+int
60883+acl_alloc_stack_init(unsigned long size)
60884+{
60885+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60886+ alloc_stack =
60887+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60888+ else
60889+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60890+
60891+ alloc_stack_size = size;
60892+
60893+ if (!alloc_stack)
60894+ return 0;
60895+ else
60896+ return 1;
60897+}
60898diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60899new file mode 100644
60900index 0000000..bdd51ea
60901--- /dev/null
60902+++ b/grsecurity/gracl_cap.c
60903@@ -0,0 +1,110 @@
60904+#include <linux/kernel.h>
60905+#include <linux/module.h>
60906+#include <linux/sched.h>
60907+#include <linux/gracl.h>
60908+#include <linux/grsecurity.h>
60909+#include <linux/grinternal.h>
60910+
60911+extern const char *captab_log[];
60912+extern int captab_log_entries;
60913+
60914+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
60915+{
60916+ struct acl_subject_label *curracl;
60917+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60918+ kernel_cap_t cap_audit = __cap_empty_set;
60919+
60920+ if (!gr_acl_is_enabled())
60921+ return 1;
60922+
60923+ curracl = task->acl;
60924+
60925+ cap_drop = curracl->cap_lower;
60926+ cap_mask = curracl->cap_mask;
60927+ cap_audit = curracl->cap_invert_audit;
60928+
60929+ while ((curracl = curracl->parent_subject)) {
60930+ /* if the cap isn't specified in the current computed mask but is specified in the
60931+ current level subject, and is lowered in the current level subject, then add
60932+ it to the set of dropped capabilities
60933+ otherwise, add the current level subject's mask to the current computed mask
60934+ */
60935+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60936+ cap_raise(cap_mask, cap);
60937+ if (cap_raised(curracl->cap_lower, cap))
60938+ cap_raise(cap_drop, cap);
60939+ if (cap_raised(curracl->cap_invert_audit, cap))
60940+ cap_raise(cap_audit, cap);
60941+ }
60942+ }
60943+
60944+ if (!cap_raised(cap_drop, cap)) {
60945+ if (cap_raised(cap_audit, cap))
60946+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60947+ return 1;
60948+ }
60949+
60950+ curracl = task->acl;
60951+
60952+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60953+ && cap_raised(cred->cap_effective, cap)) {
60954+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60955+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
60956+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
60957+ gr_to_filename(task->exec_file->f_path.dentry,
60958+ task->exec_file->f_path.mnt) : curracl->filename,
60959+ curracl->filename, 0UL,
60960+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60961+ return 1;
60962+ }
60963+
60964+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60965+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60966+
60967+ return 0;
60968+}
60969+
60970+int
60971+gr_acl_is_capable(const int cap)
60972+{
60973+ return gr_task_acl_is_capable(current, current_cred(), cap);
60974+}
60975+
60976+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
60977+{
60978+ struct acl_subject_label *curracl;
60979+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60980+
60981+ if (!gr_acl_is_enabled())
60982+ return 1;
60983+
60984+ curracl = task->acl;
60985+
60986+ cap_drop = curracl->cap_lower;
60987+ cap_mask = curracl->cap_mask;
60988+
60989+ while ((curracl = curracl->parent_subject)) {
60990+ /* if the cap isn't specified in the current computed mask but is specified in the
60991+ current level subject, and is lowered in the current level subject, then add
60992+ it to the set of dropped capabilities
60993+ otherwise, add the current level subject's mask to the current computed mask
60994+ */
60995+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60996+ cap_raise(cap_mask, cap);
60997+ if (cap_raised(curracl->cap_lower, cap))
60998+ cap_raise(cap_drop, cap);
60999+ }
61000+ }
61001+
61002+ if (!cap_raised(cap_drop, cap))
61003+ return 1;
61004+
61005+ return 0;
61006+}
61007+
61008+int
61009+gr_acl_is_capable_nolog(const int cap)
61010+{
61011+ return gr_task_acl_is_capable_nolog(current, cap);
61012+}
61013+
61014diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
61015new file mode 100644
61016index 0000000..a340c17
61017--- /dev/null
61018+++ b/grsecurity/gracl_fs.c
61019@@ -0,0 +1,431 @@
61020+#include <linux/kernel.h>
61021+#include <linux/sched.h>
61022+#include <linux/types.h>
61023+#include <linux/fs.h>
61024+#include <linux/file.h>
61025+#include <linux/stat.h>
61026+#include <linux/grsecurity.h>
61027+#include <linux/grinternal.h>
61028+#include <linux/gracl.h>
61029+
61030+umode_t
61031+gr_acl_umask(void)
61032+{
61033+ if (unlikely(!gr_acl_is_enabled()))
61034+ return 0;
61035+
61036+ return current->role->umask;
61037+}
61038+
61039+__u32
61040+gr_acl_handle_hidden_file(const struct dentry * dentry,
61041+ const struct vfsmount * mnt)
61042+{
61043+ __u32 mode;
61044+
61045+ if (unlikely(!dentry->d_inode))
61046+ return GR_FIND;
61047+
61048+ mode =
61049+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
61050+
61051+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
61052+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61053+ return mode;
61054+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
61055+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
61056+ return 0;
61057+ } else if (unlikely(!(mode & GR_FIND)))
61058+ return 0;
61059+
61060+ return GR_FIND;
61061+}
61062+
61063+__u32
61064+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
61065+ int acc_mode)
61066+{
61067+ __u32 reqmode = GR_FIND;
61068+ __u32 mode;
61069+
61070+ if (unlikely(!dentry->d_inode))
61071+ return reqmode;
61072+
61073+ if (acc_mode & MAY_APPEND)
61074+ reqmode |= GR_APPEND;
61075+ else if (acc_mode & MAY_WRITE)
61076+ reqmode |= GR_WRITE;
61077+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
61078+ reqmode |= GR_READ;
61079+
61080+ mode =
61081+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61082+ mnt);
61083+
61084+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61085+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61086+ reqmode & GR_READ ? " reading" : "",
61087+ reqmode & GR_WRITE ? " writing" : reqmode &
61088+ GR_APPEND ? " appending" : "");
61089+ return reqmode;
61090+ } else
61091+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61092+ {
61093+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
61094+ reqmode & GR_READ ? " reading" : "",
61095+ reqmode & GR_WRITE ? " writing" : reqmode &
61096+ GR_APPEND ? " appending" : "");
61097+ return 0;
61098+ } else if (unlikely((mode & reqmode) != reqmode))
61099+ return 0;
61100+
61101+ return reqmode;
61102+}
61103+
61104+__u32
61105+gr_acl_handle_creat(const struct dentry * dentry,
61106+ const struct dentry * p_dentry,
61107+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
61108+ const int imode)
61109+{
61110+ __u32 reqmode = GR_WRITE | GR_CREATE;
61111+ __u32 mode;
61112+
61113+ if (acc_mode & MAY_APPEND)
61114+ reqmode |= GR_APPEND;
61115+ // if a directory was required or the directory already exists, then
61116+ // don't count this open as a read
61117+ if ((acc_mode & MAY_READ) &&
61118+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
61119+ reqmode |= GR_READ;
61120+ if ((open_flags & O_CREAT) &&
61121+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61122+ reqmode |= GR_SETID;
61123+
61124+ mode =
61125+ gr_check_create(dentry, p_dentry, p_mnt,
61126+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61127+
61128+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61129+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61130+ reqmode & GR_READ ? " reading" : "",
61131+ reqmode & GR_WRITE ? " writing" : reqmode &
61132+ GR_APPEND ? " appending" : "");
61133+ return reqmode;
61134+ } else
61135+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61136+ {
61137+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
61138+ reqmode & GR_READ ? " reading" : "",
61139+ reqmode & GR_WRITE ? " writing" : reqmode &
61140+ GR_APPEND ? " appending" : "");
61141+ return 0;
61142+ } else if (unlikely((mode & reqmode) != reqmode))
61143+ return 0;
61144+
61145+ return reqmode;
61146+}
61147+
61148+__u32
61149+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
61150+ const int fmode)
61151+{
61152+ __u32 mode, reqmode = GR_FIND;
61153+
61154+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
61155+ reqmode |= GR_EXEC;
61156+ if (fmode & S_IWOTH)
61157+ reqmode |= GR_WRITE;
61158+ if (fmode & S_IROTH)
61159+ reqmode |= GR_READ;
61160+
61161+ mode =
61162+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
61163+ mnt);
61164+
61165+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
61166+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61167+ reqmode & GR_READ ? " reading" : "",
61168+ reqmode & GR_WRITE ? " writing" : "",
61169+ reqmode & GR_EXEC ? " executing" : "");
61170+ return reqmode;
61171+ } else
61172+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
61173+ {
61174+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
61175+ reqmode & GR_READ ? " reading" : "",
61176+ reqmode & GR_WRITE ? " writing" : "",
61177+ reqmode & GR_EXEC ? " executing" : "");
61178+ return 0;
61179+ } else if (unlikely((mode & reqmode) != reqmode))
61180+ return 0;
61181+
61182+ return reqmode;
61183+}
61184+
61185+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
61186+{
61187+ __u32 mode;
61188+
61189+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
61190+
61191+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61192+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
61193+ return mode;
61194+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61195+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
61196+ return 0;
61197+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61198+ return 0;
61199+
61200+ return (reqmode);
61201+}
61202+
61203+__u32
61204+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
61205+{
61206+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
61207+}
61208+
61209+__u32
61210+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
61211+{
61212+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
61213+}
61214+
61215+__u32
61216+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
61217+{
61218+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
61219+}
61220+
61221+__u32
61222+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
61223+{
61224+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
61225+}
61226+
61227+__u32
61228+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
61229+ umode_t *modeptr)
61230+{
61231+ umode_t mode;
61232+
61233+ *modeptr &= ~gr_acl_umask();
61234+ mode = *modeptr;
61235+
61236+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
61237+ return 1;
61238+
61239+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
61240+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
61241+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
61242+ GR_CHMOD_ACL_MSG);
61243+ } else {
61244+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
61245+ }
61246+}
61247+
61248+__u32
61249+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
61250+{
61251+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
61252+}
61253+
61254+__u32
61255+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
61256+{
61257+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
61258+}
61259+
61260+__u32
61261+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
61262+{
61263+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
61264+}
61265+
61266+__u32
61267+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
61268+{
61269+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
61270+ GR_UNIXCONNECT_ACL_MSG);
61271+}
61272+
61273+/* hardlinks require at minimum create and link permission,
61274+ any additional privilege required is based on the
61275+ privilege of the file being linked to
61276+*/
61277+__u32
61278+gr_acl_handle_link(const struct dentry * new_dentry,
61279+ const struct dentry * parent_dentry,
61280+ const struct vfsmount * parent_mnt,
61281+ const struct dentry * old_dentry,
61282+ const struct vfsmount * old_mnt, const struct filename *to)
61283+{
61284+ __u32 mode;
61285+ __u32 needmode = GR_CREATE | GR_LINK;
61286+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
61287+
61288+ mode =
61289+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
61290+ old_mnt);
61291+
61292+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
61293+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61294+ return mode;
61295+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61296+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
61297+ return 0;
61298+ } else if (unlikely((mode & needmode) != needmode))
61299+ return 0;
61300+
61301+ return 1;
61302+}
61303+
61304+__u32
61305+gr_acl_handle_symlink(const struct dentry * new_dentry,
61306+ const struct dentry * parent_dentry,
61307+ const struct vfsmount * parent_mnt, const struct filename *from)
61308+{
61309+ __u32 needmode = GR_WRITE | GR_CREATE;
61310+ __u32 mode;
61311+
61312+ mode =
61313+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
61314+ GR_CREATE | GR_AUDIT_CREATE |
61315+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
61316+
61317+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
61318+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
61319+ return mode;
61320+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
61321+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
61322+ return 0;
61323+ } else if (unlikely((mode & needmode) != needmode))
61324+ return 0;
61325+
61326+ return (GR_WRITE | GR_CREATE);
61327+}
61328+
61329+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
61330+{
61331+ __u32 mode;
61332+
61333+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
61334+
61335+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
61336+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
61337+ return mode;
61338+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
61339+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
61340+ return 0;
61341+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
61342+ return 0;
61343+
61344+ return (reqmode);
61345+}
61346+
61347+__u32
61348+gr_acl_handle_mknod(const struct dentry * new_dentry,
61349+ const struct dentry * parent_dentry,
61350+ const struct vfsmount * parent_mnt,
61351+ const int mode)
61352+{
61353+ __u32 reqmode = GR_WRITE | GR_CREATE;
61354+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
61355+ reqmode |= GR_SETID;
61356+
61357+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61358+ reqmode, GR_MKNOD_ACL_MSG);
61359+}
61360+
61361+__u32
61362+gr_acl_handle_mkdir(const struct dentry *new_dentry,
61363+ const struct dentry *parent_dentry,
61364+ const struct vfsmount *parent_mnt)
61365+{
61366+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
61367+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
61368+}
61369+
61370+#define RENAME_CHECK_SUCCESS(old, new) \
61371+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
61372+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
61373+
61374+int
61375+gr_acl_handle_rename(struct dentry *new_dentry,
61376+ struct dentry *parent_dentry,
61377+ const struct vfsmount *parent_mnt,
61378+ struct dentry *old_dentry,
61379+ struct inode *old_parent_inode,
61380+ struct vfsmount *old_mnt, const struct filename *newname)
61381+{
61382+ __u32 comp1, comp2;
61383+ int error = 0;
61384+
61385+ if (unlikely(!gr_acl_is_enabled()))
61386+ return 0;
61387+
61388+ if (!new_dentry->d_inode) {
61389+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
61390+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
61391+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
61392+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
61393+ GR_DELETE | GR_AUDIT_DELETE |
61394+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61395+ GR_SUPPRESS, old_mnt);
61396+ } else {
61397+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
61398+ GR_CREATE | GR_DELETE |
61399+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
61400+ GR_AUDIT_READ | GR_AUDIT_WRITE |
61401+ GR_SUPPRESS, parent_mnt);
61402+ comp2 =
61403+ gr_search_file(old_dentry,
61404+ GR_READ | GR_WRITE | GR_AUDIT_READ |
61405+ GR_DELETE | GR_AUDIT_DELETE |
61406+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
61407+ }
61408+
61409+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
61410+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
61411+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
61412+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
61413+ && !(comp2 & GR_SUPPRESS)) {
61414+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
61415+ error = -EACCES;
61416+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
61417+ error = -EACCES;
61418+
61419+ return error;
61420+}
61421+
61422+void
61423+gr_acl_handle_exit(void)
61424+{
61425+ u16 id;
61426+ char *rolename;
61427+
61428+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
61429+ !(current->role->roletype & GR_ROLE_PERSIST))) {
61430+ id = current->acl_role_id;
61431+ rolename = current->role->rolename;
61432+ gr_set_acls(1);
61433+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
61434+ }
61435+
61436+ gr_put_exec_file(current);
61437+ return;
61438+}
61439+
61440+int
61441+gr_acl_handle_procpidmem(const struct task_struct *task)
61442+{
61443+ if (unlikely(!gr_acl_is_enabled()))
61444+ return 0;
61445+
61446+ if (task != current && task->acl->mode & GR_PROTPROCFD)
61447+ return -EACCES;
61448+
61449+ return 0;
61450+}
61451diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
61452new file mode 100644
61453index 0000000..4699807
61454--- /dev/null
61455+++ b/grsecurity/gracl_ip.c
61456@@ -0,0 +1,384 @@
61457+#include <linux/kernel.h>
61458+#include <asm/uaccess.h>
61459+#include <asm/errno.h>
61460+#include <net/sock.h>
61461+#include <linux/file.h>
61462+#include <linux/fs.h>
61463+#include <linux/net.h>
61464+#include <linux/in.h>
61465+#include <linux/skbuff.h>
61466+#include <linux/ip.h>
61467+#include <linux/udp.h>
61468+#include <linux/types.h>
61469+#include <linux/sched.h>
61470+#include <linux/netdevice.h>
61471+#include <linux/inetdevice.h>
61472+#include <linux/gracl.h>
61473+#include <linux/grsecurity.h>
61474+#include <linux/grinternal.h>
61475+
61476+#define GR_BIND 0x01
61477+#define GR_CONNECT 0x02
61478+#define GR_INVERT 0x04
61479+#define GR_BINDOVERRIDE 0x08
61480+#define GR_CONNECTOVERRIDE 0x10
61481+#define GR_SOCK_FAMILY 0x20
61482+
61483+static const char * gr_protocols[IPPROTO_MAX] = {
61484+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
61485+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
61486+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
61487+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
61488+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
61489+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
61490+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
61491+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
61492+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
61493+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
61494+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
61495+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
61496+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
61497+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
61498+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
61499+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
61500+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
61501+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
61502+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
61503+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
61504+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
61505+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
61506+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
61507+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
61508+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
61509+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
61510+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
61511+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
61512+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
61513+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
61514+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
61515+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
61516+ };
61517+
61518+static const char * gr_socktypes[SOCK_MAX] = {
61519+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
61520+ "unknown:7", "unknown:8", "unknown:9", "packet"
61521+ };
61522+
61523+static const char * gr_sockfamilies[AF_MAX+1] = {
61524+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
61525+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
61526+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
61527+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
61528+ };
61529+
61530+const char *
61531+gr_proto_to_name(unsigned char proto)
61532+{
61533+ return gr_protocols[proto];
61534+}
61535+
61536+const char *
61537+gr_socktype_to_name(unsigned char type)
61538+{
61539+ return gr_socktypes[type];
61540+}
61541+
61542+const char *
61543+gr_sockfamily_to_name(unsigned char family)
61544+{
61545+ return gr_sockfamilies[family];
61546+}
61547+
61548+int
61549+gr_search_socket(const int domain, const int type, const int protocol)
61550+{
61551+ struct acl_subject_label *curr;
61552+ const struct cred *cred = current_cred();
61553+
61554+ if (unlikely(!gr_acl_is_enabled()))
61555+ goto exit;
61556+
61557+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
61558+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
61559+ goto exit; // let the kernel handle it
61560+
61561+ curr = current->acl;
61562+
61563+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
61564+ /* the family is allowed, if this is PF_INET allow it only if
61565+ the extra sock type/protocol checks pass */
61566+ if (domain == PF_INET)
61567+ goto inet_check;
61568+ goto exit;
61569+ } else {
61570+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61571+ __u32 fakeip = 0;
61572+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61573+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61574+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61575+ gr_to_filename(current->exec_file->f_path.dentry,
61576+ current->exec_file->f_path.mnt) :
61577+ curr->filename, curr->filename,
61578+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
61579+ &current->signal->saved_ip);
61580+ goto exit;
61581+ }
61582+ goto exit_fail;
61583+ }
61584+
61585+inet_check:
61586+ /* the rest of this checking is for IPv4 only */
61587+ if (!curr->ips)
61588+ goto exit;
61589+
61590+ if ((curr->ip_type & (1 << type)) &&
61591+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61592+ goto exit;
61593+
61594+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61595+ /* we don't place acls on raw sockets , and sometimes
61596+ dgram/ip sockets are opened for ioctl and not
61597+ bind/connect, so we'll fake a bind learn log */
61598+ if (type == SOCK_RAW || type == SOCK_PACKET) {
61599+ __u32 fakeip = 0;
61600+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61601+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61602+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61603+ gr_to_filename(current->exec_file->f_path.dentry,
61604+ current->exec_file->f_path.mnt) :
61605+ curr->filename, curr->filename,
61606+ &fakeip, 0, type,
61607+ protocol, GR_CONNECT, &current->signal->saved_ip);
61608+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61609+ __u32 fakeip = 0;
61610+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61611+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61612+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61613+ gr_to_filename(current->exec_file->f_path.dentry,
61614+ current->exec_file->f_path.mnt) :
61615+ curr->filename, curr->filename,
61616+ &fakeip, 0, type,
61617+ protocol, GR_BIND, &current->signal->saved_ip);
61618+ }
61619+ /* we'll log when they use connect or bind */
61620+ goto exit;
61621+ }
61622+
61623+exit_fail:
61624+ if (domain == PF_INET)
61625+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61626+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
61627+ else
61628+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61629+ gr_socktype_to_name(type), protocol);
61630+
61631+ return 0;
61632+exit:
61633+ return 1;
61634+}
61635+
61636+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61637+{
61638+ if ((ip->mode & mode) &&
61639+ (ip_port >= ip->low) &&
61640+ (ip_port <= ip->high) &&
61641+ ((ntohl(ip_addr) & our_netmask) ==
61642+ (ntohl(our_addr) & our_netmask))
61643+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61644+ && (ip->type & (1 << type))) {
61645+ if (ip->mode & GR_INVERT)
61646+ return 2; // specifically denied
61647+ else
61648+ return 1; // allowed
61649+ }
61650+
61651+ return 0; // not specifically allowed, may continue parsing
61652+}
61653+
61654+static int
61655+gr_search_connectbind(const int full_mode, struct sock *sk,
61656+ struct sockaddr_in *addr, const int type)
61657+{
61658+ char iface[IFNAMSIZ] = {0};
61659+ struct acl_subject_label *curr;
61660+ struct acl_ip_label *ip;
61661+ struct inet_sock *isk;
61662+ struct net_device *dev;
61663+ struct in_device *idev;
61664+ unsigned long i;
61665+ int ret;
61666+ int mode = full_mode & (GR_BIND | GR_CONNECT);
61667+ __u32 ip_addr = 0;
61668+ __u32 our_addr;
61669+ __u32 our_netmask;
61670+ char *p;
61671+ __u16 ip_port = 0;
61672+ const struct cred *cred = current_cred();
61673+
61674+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61675+ return 0;
61676+
61677+ curr = current->acl;
61678+ isk = inet_sk(sk);
61679+
61680+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61681+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61682+ addr->sin_addr.s_addr = curr->inaddr_any_override;
61683+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61684+ struct sockaddr_in saddr;
61685+ int err;
61686+
61687+ saddr.sin_family = AF_INET;
61688+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
61689+ saddr.sin_port = isk->inet_sport;
61690+
61691+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61692+ if (err)
61693+ return err;
61694+
61695+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61696+ if (err)
61697+ return err;
61698+ }
61699+
61700+ if (!curr->ips)
61701+ return 0;
61702+
61703+ ip_addr = addr->sin_addr.s_addr;
61704+ ip_port = ntohs(addr->sin_port);
61705+
61706+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61707+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61708+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
61709+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
61710+ gr_to_filename(current->exec_file->f_path.dentry,
61711+ current->exec_file->f_path.mnt) :
61712+ curr->filename, curr->filename,
61713+ &ip_addr, ip_port, type,
61714+ sk->sk_protocol, mode, &current->signal->saved_ip);
61715+ return 0;
61716+ }
61717+
61718+ for (i = 0; i < curr->ip_num; i++) {
61719+ ip = *(curr->ips + i);
61720+ if (ip->iface != NULL) {
61721+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
61722+ p = strchr(iface, ':');
61723+ if (p != NULL)
61724+ *p = '\0';
61725+ dev = dev_get_by_name(sock_net(sk), iface);
61726+ if (dev == NULL)
61727+ continue;
61728+ idev = in_dev_get(dev);
61729+ if (idev == NULL) {
61730+ dev_put(dev);
61731+ continue;
61732+ }
61733+ rcu_read_lock();
61734+ for_ifa(idev) {
61735+ if (!strcmp(ip->iface, ifa->ifa_label)) {
61736+ our_addr = ifa->ifa_address;
61737+ our_netmask = 0xffffffff;
61738+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61739+ if (ret == 1) {
61740+ rcu_read_unlock();
61741+ in_dev_put(idev);
61742+ dev_put(dev);
61743+ return 0;
61744+ } else if (ret == 2) {
61745+ rcu_read_unlock();
61746+ in_dev_put(idev);
61747+ dev_put(dev);
61748+ goto denied;
61749+ }
61750+ }
61751+ } endfor_ifa(idev);
61752+ rcu_read_unlock();
61753+ in_dev_put(idev);
61754+ dev_put(dev);
61755+ } else {
61756+ our_addr = ip->addr;
61757+ our_netmask = ip->netmask;
61758+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61759+ if (ret == 1)
61760+ return 0;
61761+ else if (ret == 2)
61762+ goto denied;
61763+ }
61764+ }
61765+
61766+denied:
61767+ if (mode == GR_BIND)
61768+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61769+ else if (mode == GR_CONNECT)
61770+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61771+
61772+ return -EACCES;
61773+}
61774+
61775+int
61776+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61777+{
61778+ /* always allow disconnection of dgram sockets with connect */
61779+ if (addr->sin_family == AF_UNSPEC)
61780+ return 0;
61781+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61782+}
61783+
61784+int
61785+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61786+{
61787+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61788+}
61789+
61790+int gr_search_listen(struct socket *sock)
61791+{
61792+ struct sock *sk = sock->sk;
61793+ struct sockaddr_in addr;
61794+
61795+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
61796+ addr.sin_port = inet_sk(sk)->inet_sport;
61797+
61798+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61799+}
61800+
61801+int gr_search_accept(struct socket *sock)
61802+{
61803+ struct sock *sk = sock->sk;
61804+ struct sockaddr_in addr;
61805+
61806+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
61807+ addr.sin_port = inet_sk(sk)->inet_sport;
61808+
61809+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61810+}
61811+
61812+int
61813+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61814+{
61815+ if (addr)
61816+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61817+ else {
61818+ struct sockaddr_in sin;
61819+ const struct inet_sock *inet = inet_sk(sk);
61820+
61821+ sin.sin_addr.s_addr = inet->inet_daddr;
61822+ sin.sin_port = inet->inet_dport;
61823+
61824+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61825+ }
61826+}
61827+
61828+int
61829+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61830+{
61831+ struct sockaddr_in sin;
61832+
61833+ if (unlikely(skb->len < sizeof (struct udphdr)))
61834+ return 0; // skip this packet
61835+
61836+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61837+ sin.sin_port = udp_hdr(skb)->source;
61838+
61839+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61840+}
61841diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61842new file mode 100644
61843index 0000000..25f54ef
61844--- /dev/null
61845+++ b/grsecurity/gracl_learn.c
61846@@ -0,0 +1,207 @@
61847+#include <linux/kernel.h>
61848+#include <linux/mm.h>
61849+#include <linux/sched.h>
61850+#include <linux/poll.h>
61851+#include <linux/string.h>
61852+#include <linux/file.h>
61853+#include <linux/types.h>
61854+#include <linux/vmalloc.h>
61855+#include <linux/grinternal.h>
61856+
61857+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61858+ size_t count, loff_t *ppos);
61859+extern int gr_acl_is_enabled(void);
61860+
61861+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61862+static int gr_learn_attached;
61863+
61864+/* use a 512k buffer */
61865+#define LEARN_BUFFER_SIZE (512 * 1024)
61866+
61867+static DEFINE_SPINLOCK(gr_learn_lock);
61868+static DEFINE_MUTEX(gr_learn_user_mutex);
61869+
61870+/* we need to maintain two buffers, so that the kernel context of grlearn
61871+ uses a semaphore around the userspace copying, and the other kernel contexts
61872+ use a spinlock when copying into the buffer, since they cannot sleep
61873+*/
61874+static char *learn_buffer;
61875+static char *learn_buffer_user;
61876+static int learn_buffer_len;
61877+static int learn_buffer_user_len;
61878+
61879+static ssize_t
61880+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61881+{
61882+ DECLARE_WAITQUEUE(wait, current);
61883+ ssize_t retval = 0;
61884+
61885+ add_wait_queue(&learn_wait, &wait);
61886+ set_current_state(TASK_INTERRUPTIBLE);
61887+ do {
61888+ mutex_lock(&gr_learn_user_mutex);
61889+ spin_lock(&gr_learn_lock);
61890+ if (learn_buffer_len)
61891+ break;
61892+ spin_unlock(&gr_learn_lock);
61893+ mutex_unlock(&gr_learn_user_mutex);
61894+ if (file->f_flags & O_NONBLOCK) {
61895+ retval = -EAGAIN;
61896+ goto out;
61897+ }
61898+ if (signal_pending(current)) {
61899+ retval = -ERESTARTSYS;
61900+ goto out;
61901+ }
61902+
61903+ schedule();
61904+ } while (1);
61905+
61906+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61907+ learn_buffer_user_len = learn_buffer_len;
61908+ retval = learn_buffer_len;
61909+ learn_buffer_len = 0;
61910+
61911+ spin_unlock(&gr_learn_lock);
61912+
61913+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61914+ retval = -EFAULT;
61915+
61916+ mutex_unlock(&gr_learn_user_mutex);
61917+out:
61918+ set_current_state(TASK_RUNNING);
61919+ remove_wait_queue(&learn_wait, &wait);
61920+ return retval;
61921+}
61922+
61923+static unsigned int
61924+poll_learn(struct file * file, poll_table * wait)
61925+{
61926+ poll_wait(file, &learn_wait, wait);
61927+
61928+ if (learn_buffer_len)
61929+ return (POLLIN | POLLRDNORM);
61930+
61931+ return 0;
61932+}
61933+
61934+void
61935+gr_clear_learn_entries(void)
61936+{
61937+ char *tmp;
61938+
61939+ mutex_lock(&gr_learn_user_mutex);
61940+ spin_lock(&gr_learn_lock);
61941+ tmp = learn_buffer;
61942+ learn_buffer = NULL;
61943+ spin_unlock(&gr_learn_lock);
61944+ if (tmp)
61945+ vfree(tmp);
61946+ if (learn_buffer_user != NULL) {
61947+ vfree(learn_buffer_user);
61948+ learn_buffer_user = NULL;
61949+ }
61950+ learn_buffer_len = 0;
61951+ mutex_unlock(&gr_learn_user_mutex);
61952+
61953+ return;
61954+}
61955+
61956+void
61957+gr_add_learn_entry(const char *fmt, ...)
61958+{
61959+ va_list args;
61960+ unsigned int len;
61961+
61962+ if (!gr_learn_attached)
61963+ return;
61964+
61965+ spin_lock(&gr_learn_lock);
61966+
61967+ /* leave a gap at the end so we know when it's "full" but don't have to
61968+ compute the exact length of the string we're trying to append
61969+ */
61970+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61971+ spin_unlock(&gr_learn_lock);
61972+ wake_up_interruptible(&learn_wait);
61973+ return;
61974+ }
61975+ if (learn_buffer == NULL) {
61976+ spin_unlock(&gr_learn_lock);
61977+ return;
61978+ }
61979+
61980+ va_start(args, fmt);
61981+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61982+ va_end(args);
61983+
61984+ learn_buffer_len += len + 1;
61985+
61986+ spin_unlock(&gr_learn_lock);
61987+ wake_up_interruptible(&learn_wait);
61988+
61989+ return;
61990+}
61991+
61992+static int
61993+open_learn(struct inode *inode, struct file *file)
61994+{
61995+ if (file->f_mode & FMODE_READ && gr_learn_attached)
61996+ return -EBUSY;
61997+ if (file->f_mode & FMODE_READ) {
61998+ int retval = 0;
61999+ mutex_lock(&gr_learn_user_mutex);
62000+ if (learn_buffer == NULL)
62001+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
62002+ if (learn_buffer_user == NULL)
62003+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
62004+ if (learn_buffer == NULL) {
62005+ retval = -ENOMEM;
62006+ goto out_error;
62007+ }
62008+ if (learn_buffer_user == NULL) {
62009+ retval = -ENOMEM;
62010+ goto out_error;
62011+ }
62012+ learn_buffer_len = 0;
62013+ learn_buffer_user_len = 0;
62014+ gr_learn_attached = 1;
62015+out_error:
62016+ mutex_unlock(&gr_learn_user_mutex);
62017+ return retval;
62018+ }
62019+ return 0;
62020+}
62021+
62022+static int
62023+close_learn(struct inode *inode, struct file *file)
62024+{
62025+ if (file->f_mode & FMODE_READ) {
62026+ char *tmp = NULL;
62027+ mutex_lock(&gr_learn_user_mutex);
62028+ spin_lock(&gr_learn_lock);
62029+ tmp = learn_buffer;
62030+ learn_buffer = NULL;
62031+ spin_unlock(&gr_learn_lock);
62032+ if (tmp)
62033+ vfree(tmp);
62034+ if (learn_buffer_user != NULL) {
62035+ vfree(learn_buffer_user);
62036+ learn_buffer_user = NULL;
62037+ }
62038+ learn_buffer_len = 0;
62039+ learn_buffer_user_len = 0;
62040+ gr_learn_attached = 0;
62041+ mutex_unlock(&gr_learn_user_mutex);
62042+ }
62043+
62044+ return 0;
62045+}
62046+
62047+const struct file_operations grsec_fops = {
62048+ .read = read_learn,
62049+ .write = write_grsec_handler,
62050+ .open = open_learn,
62051+ .release = close_learn,
62052+ .poll = poll_learn,
62053+};
62054diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
62055new file mode 100644
62056index 0000000..39645c9
62057--- /dev/null
62058+++ b/grsecurity/gracl_res.c
62059@@ -0,0 +1,68 @@
62060+#include <linux/kernel.h>
62061+#include <linux/sched.h>
62062+#include <linux/gracl.h>
62063+#include <linux/grinternal.h>
62064+
62065+static const char *restab_log[] = {
62066+ [RLIMIT_CPU] = "RLIMIT_CPU",
62067+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
62068+ [RLIMIT_DATA] = "RLIMIT_DATA",
62069+ [RLIMIT_STACK] = "RLIMIT_STACK",
62070+ [RLIMIT_CORE] = "RLIMIT_CORE",
62071+ [RLIMIT_RSS] = "RLIMIT_RSS",
62072+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
62073+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
62074+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
62075+ [RLIMIT_AS] = "RLIMIT_AS",
62076+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
62077+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
62078+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
62079+ [RLIMIT_NICE] = "RLIMIT_NICE",
62080+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
62081+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
62082+ [GR_CRASH_RES] = "RLIMIT_CRASH"
62083+};
62084+
62085+void
62086+gr_log_resource(const struct task_struct *task,
62087+ const int res, const unsigned long wanted, const int gt)
62088+{
62089+ const struct cred *cred;
62090+ unsigned long rlim;
62091+
62092+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
62093+ return;
62094+
62095+ // not yet supported resource
62096+ if (unlikely(!restab_log[res]))
62097+ return;
62098+
62099+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
62100+ rlim = task_rlimit_max(task, res);
62101+ else
62102+ rlim = task_rlimit(task, res);
62103+
62104+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
62105+ return;
62106+
62107+ rcu_read_lock();
62108+ cred = __task_cred(task);
62109+
62110+ if (res == RLIMIT_NPROC &&
62111+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
62112+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
62113+ goto out_rcu_unlock;
62114+ else if (res == RLIMIT_MEMLOCK &&
62115+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
62116+ goto out_rcu_unlock;
62117+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
62118+ goto out_rcu_unlock;
62119+ rcu_read_unlock();
62120+
62121+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
62122+
62123+ return;
62124+out_rcu_unlock:
62125+ rcu_read_unlock();
62126+ return;
62127+}
62128diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
62129new file mode 100644
62130index 0000000..8c8fc9d
62131--- /dev/null
62132+++ b/grsecurity/gracl_segv.c
62133@@ -0,0 +1,303 @@
62134+#include <linux/kernel.h>
62135+#include <linux/mm.h>
62136+#include <asm/uaccess.h>
62137+#include <asm/errno.h>
62138+#include <asm/mman.h>
62139+#include <net/sock.h>
62140+#include <linux/file.h>
62141+#include <linux/fs.h>
62142+#include <linux/net.h>
62143+#include <linux/in.h>
62144+#include <linux/slab.h>
62145+#include <linux/types.h>
62146+#include <linux/sched.h>
62147+#include <linux/timer.h>
62148+#include <linux/gracl.h>
62149+#include <linux/grsecurity.h>
62150+#include <linux/grinternal.h>
62151+
62152+static struct crash_uid *uid_set;
62153+static unsigned short uid_used;
62154+static DEFINE_SPINLOCK(gr_uid_lock);
62155+extern rwlock_t gr_inode_lock;
62156+extern struct acl_subject_label *
62157+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
62158+ struct acl_role_label *role);
62159+
62160+#ifdef CONFIG_BTRFS_FS
62161+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
62162+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
62163+#endif
62164+
62165+static inline dev_t __get_dev(const struct dentry *dentry)
62166+{
62167+#ifdef CONFIG_BTRFS_FS
62168+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
62169+ return get_btrfs_dev_from_inode(dentry->d_inode);
62170+ else
62171+#endif
62172+ return dentry->d_inode->i_sb->s_dev;
62173+}
62174+
62175+int
62176+gr_init_uidset(void)
62177+{
62178+ uid_set =
62179+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
62180+ uid_used = 0;
62181+
62182+ return uid_set ? 1 : 0;
62183+}
62184+
62185+void
62186+gr_free_uidset(void)
62187+{
62188+ if (uid_set)
62189+ kfree(uid_set);
62190+
62191+ return;
62192+}
62193+
62194+int
62195+gr_find_uid(const uid_t uid)
62196+{
62197+ struct crash_uid *tmp = uid_set;
62198+ uid_t buid;
62199+ int low = 0, high = uid_used - 1, mid;
62200+
62201+ while (high >= low) {
62202+ mid = (low + high) >> 1;
62203+ buid = tmp[mid].uid;
62204+ if (buid == uid)
62205+ return mid;
62206+ if (buid > uid)
62207+ high = mid - 1;
62208+ if (buid < uid)
62209+ low = mid + 1;
62210+ }
62211+
62212+ return -1;
62213+}
62214+
62215+static __inline__ void
62216+gr_insertsort(void)
62217+{
62218+ unsigned short i, j;
62219+ struct crash_uid index;
62220+
62221+ for (i = 1; i < uid_used; i++) {
62222+ index = uid_set[i];
62223+ j = i;
62224+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
62225+ uid_set[j] = uid_set[j - 1];
62226+ j--;
62227+ }
62228+ uid_set[j] = index;
62229+ }
62230+
62231+ return;
62232+}
62233+
62234+static __inline__ void
62235+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
62236+{
62237+ int loc;
62238+ uid_t uid = GR_GLOBAL_UID(kuid);
62239+
62240+ if (uid_used == GR_UIDTABLE_MAX)
62241+ return;
62242+
62243+ loc = gr_find_uid(uid);
62244+
62245+ if (loc >= 0) {
62246+ uid_set[loc].expires = expires;
62247+ return;
62248+ }
62249+
62250+ uid_set[uid_used].uid = uid;
62251+ uid_set[uid_used].expires = expires;
62252+ uid_used++;
62253+
62254+ gr_insertsort();
62255+
62256+ return;
62257+}
62258+
62259+void
62260+gr_remove_uid(const unsigned short loc)
62261+{
62262+ unsigned short i;
62263+
62264+ for (i = loc + 1; i < uid_used; i++)
62265+ uid_set[i - 1] = uid_set[i];
62266+
62267+ uid_used--;
62268+
62269+ return;
62270+}
62271+
62272+int
62273+gr_check_crash_uid(const kuid_t kuid)
62274+{
62275+ int loc;
62276+ int ret = 0;
62277+ uid_t uid;
62278+
62279+ if (unlikely(!gr_acl_is_enabled()))
62280+ return 0;
62281+
62282+ uid = GR_GLOBAL_UID(kuid);
62283+
62284+ spin_lock(&gr_uid_lock);
62285+ loc = gr_find_uid(uid);
62286+
62287+ if (loc < 0)
62288+ goto out_unlock;
62289+
62290+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
62291+ gr_remove_uid(loc);
62292+ else
62293+ ret = 1;
62294+
62295+out_unlock:
62296+ spin_unlock(&gr_uid_lock);
62297+ return ret;
62298+}
62299+
62300+static __inline__ int
62301+proc_is_setxid(const struct cred *cred)
62302+{
62303+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
62304+ !uid_eq(cred->uid, cred->fsuid))
62305+ return 1;
62306+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
62307+ !gid_eq(cred->gid, cred->fsgid))
62308+ return 1;
62309+
62310+ return 0;
62311+}
62312+
62313+extern int gr_fake_force_sig(int sig, struct task_struct *t);
62314+
62315+void
62316+gr_handle_crash(struct task_struct *task, const int sig)
62317+{
62318+ struct acl_subject_label *curr;
62319+ struct task_struct *tsk, *tsk2;
62320+ const struct cred *cred;
62321+ const struct cred *cred2;
62322+
62323+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
62324+ return;
62325+
62326+ if (unlikely(!gr_acl_is_enabled()))
62327+ return;
62328+
62329+ curr = task->acl;
62330+
62331+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
62332+ return;
62333+
62334+ if (time_before_eq(curr->expires, get_seconds())) {
62335+ curr->expires = 0;
62336+ curr->crashes = 0;
62337+ }
62338+
62339+ curr->crashes++;
62340+
62341+ if (!curr->expires)
62342+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
62343+
62344+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62345+ time_after(curr->expires, get_seconds())) {
62346+ rcu_read_lock();
62347+ cred = __task_cred(task);
62348+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
62349+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62350+ spin_lock(&gr_uid_lock);
62351+ gr_insert_uid(cred->uid, curr->expires);
62352+ spin_unlock(&gr_uid_lock);
62353+ curr->expires = 0;
62354+ curr->crashes = 0;
62355+ read_lock(&tasklist_lock);
62356+ do_each_thread(tsk2, tsk) {
62357+ cred2 = __task_cred(tsk);
62358+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
62359+ gr_fake_force_sig(SIGKILL, tsk);
62360+ } while_each_thread(tsk2, tsk);
62361+ read_unlock(&tasklist_lock);
62362+ } else {
62363+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
62364+ read_lock(&tasklist_lock);
62365+ read_lock(&grsec_exec_file_lock);
62366+ do_each_thread(tsk2, tsk) {
62367+ if (likely(tsk != task)) {
62368+ // if this thread has the same subject as the one that triggered
62369+ // RES_CRASH and it's the same binary, kill it
62370+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
62371+ gr_fake_force_sig(SIGKILL, tsk);
62372+ }
62373+ } while_each_thread(tsk2, tsk);
62374+ read_unlock(&grsec_exec_file_lock);
62375+ read_unlock(&tasklist_lock);
62376+ }
62377+ rcu_read_unlock();
62378+ }
62379+
62380+ return;
62381+}
62382+
62383+int
62384+gr_check_crash_exec(const struct file *filp)
62385+{
62386+ struct acl_subject_label *curr;
62387+
62388+ if (unlikely(!gr_acl_is_enabled()))
62389+ return 0;
62390+
62391+ read_lock(&gr_inode_lock);
62392+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
62393+ __get_dev(filp->f_path.dentry),
62394+ current->role);
62395+ read_unlock(&gr_inode_lock);
62396+
62397+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
62398+ (!curr->crashes && !curr->expires))
62399+ return 0;
62400+
62401+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
62402+ time_after(curr->expires, get_seconds()))
62403+ return 1;
62404+ else if (time_before_eq(curr->expires, get_seconds())) {
62405+ curr->crashes = 0;
62406+ curr->expires = 0;
62407+ }
62408+
62409+ return 0;
62410+}
62411+
62412+void
62413+gr_handle_alertkill(struct task_struct *task)
62414+{
62415+ struct acl_subject_label *curracl;
62416+ __u32 curr_ip;
62417+ struct task_struct *p, *p2;
62418+
62419+ if (unlikely(!gr_acl_is_enabled()))
62420+ return;
62421+
62422+ curracl = task->acl;
62423+ curr_ip = task->signal->curr_ip;
62424+
62425+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
62426+ read_lock(&tasklist_lock);
62427+ do_each_thread(p2, p) {
62428+ if (p->signal->curr_ip == curr_ip)
62429+ gr_fake_force_sig(SIGKILL, p);
62430+ } while_each_thread(p2, p);
62431+ read_unlock(&tasklist_lock);
62432+ } else if (curracl->mode & GR_KILLPROC)
62433+ gr_fake_force_sig(SIGKILL, task);
62434+
62435+ return;
62436+}
62437diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
62438new file mode 100644
62439index 0000000..98011b0
62440--- /dev/null
62441+++ b/grsecurity/gracl_shm.c
62442@@ -0,0 +1,40 @@
62443+#include <linux/kernel.h>
62444+#include <linux/mm.h>
62445+#include <linux/sched.h>
62446+#include <linux/file.h>
62447+#include <linux/ipc.h>
62448+#include <linux/gracl.h>
62449+#include <linux/grsecurity.h>
62450+#include <linux/grinternal.h>
62451+
62452+int
62453+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62454+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
62455+{
62456+ struct task_struct *task;
62457+
62458+ if (!gr_acl_is_enabled())
62459+ return 1;
62460+
62461+ rcu_read_lock();
62462+ read_lock(&tasklist_lock);
62463+
62464+ task = find_task_by_vpid(shm_cprid);
62465+
62466+ if (unlikely(!task))
62467+ task = find_task_by_vpid(shm_lapid);
62468+
62469+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
62470+ (task_pid_nr(task) == shm_lapid)) &&
62471+ (task->acl->mode & GR_PROTSHM) &&
62472+ (task->acl != current->acl))) {
62473+ read_unlock(&tasklist_lock);
62474+ rcu_read_unlock();
62475+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
62476+ return 0;
62477+ }
62478+ read_unlock(&tasklist_lock);
62479+ rcu_read_unlock();
62480+
62481+ return 1;
62482+}
62483diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
62484new file mode 100644
62485index 0000000..bc0be01
62486--- /dev/null
62487+++ b/grsecurity/grsec_chdir.c
62488@@ -0,0 +1,19 @@
62489+#include <linux/kernel.h>
62490+#include <linux/sched.h>
62491+#include <linux/fs.h>
62492+#include <linux/file.h>
62493+#include <linux/grsecurity.h>
62494+#include <linux/grinternal.h>
62495+
62496+void
62497+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
62498+{
62499+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
62500+ if ((grsec_enable_chdir && grsec_enable_group &&
62501+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
62502+ !grsec_enable_group)) {
62503+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
62504+ }
62505+#endif
62506+ return;
62507+}
62508diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
62509new file mode 100644
62510index 0000000..6d2de57
62511--- /dev/null
62512+++ b/grsecurity/grsec_chroot.c
62513@@ -0,0 +1,357 @@
62514+#include <linux/kernel.h>
62515+#include <linux/module.h>
62516+#include <linux/sched.h>
62517+#include <linux/file.h>
62518+#include <linux/fs.h>
62519+#include <linux/mount.h>
62520+#include <linux/types.h>
62521+#include "../fs/mount.h"
62522+#include <linux/grsecurity.h>
62523+#include <linux/grinternal.h>
62524+
62525+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
62526+{
62527+#ifdef CONFIG_GRKERNSEC
62528+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
62529+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root)
62530+ task->gr_is_chrooted = 1;
62531+ else
62532+ task->gr_is_chrooted = 0;
62533+
62534+ task->gr_chroot_dentry = path->dentry;
62535+#endif
62536+ return;
62537+}
62538+
62539+void gr_clear_chroot_entries(struct task_struct *task)
62540+{
62541+#ifdef CONFIG_GRKERNSEC
62542+ task->gr_is_chrooted = 0;
62543+ task->gr_chroot_dentry = NULL;
62544+#endif
62545+ return;
62546+}
62547+
62548+int
62549+gr_handle_chroot_unix(const pid_t pid)
62550+{
62551+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
62552+ struct task_struct *p;
62553+
62554+ if (unlikely(!grsec_enable_chroot_unix))
62555+ return 1;
62556+
62557+ if (likely(!proc_is_chrooted(current)))
62558+ return 1;
62559+
62560+ rcu_read_lock();
62561+ read_lock(&tasklist_lock);
62562+ p = find_task_by_vpid_unrestricted(pid);
62563+ if (unlikely(p && !have_same_root(current, p))) {
62564+ read_unlock(&tasklist_lock);
62565+ rcu_read_unlock();
62566+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
62567+ return 0;
62568+ }
62569+ read_unlock(&tasklist_lock);
62570+ rcu_read_unlock();
62571+#endif
62572+ return 1;
62573+}
62574+
62575+int
62576+gr_handle_chroot_nice(void)
62577+{
62578+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62579+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
62580+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
62581+ return -EPERM;
62582+ }
62583+#endif
62584+ return 0;
62585+}
62586+
62587+int
62588+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
62589+{
62590+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
62591+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
62592+ && proc_is_chrooted(current)) {
62593+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
62594+ return -EACCES;
62595+ }
62596+#endif
62597+ return 0;
62598+}
62599+
62600+int
62601+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62602+{
62603+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62604+ struct task_struct *p;
62605+ int ret = 0;
62606+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62607+ return ret;
62608+
62609+ read_lock(&tasklist_lock);
62610+ do_each_pid_task(pid, type, p) {
62611+ if (!have_same_root(current, p)) {
62612+ ret = 1;
62613+ goto out;
62614+ }
62615+ } while_each_pid_task(pid, type, p);
62616+out:
62617+ read_unlock(&tasklist_lock);
62618+ return ret;
62619+#endif
62620+ return 0;
62621+}
62622+
62623+int
62624+gr_pid_is_chrooted(struct task_struct *p)
62625+{
62626+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62627+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62628+ return 0;
62629+
62630+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62631+ !have_same_root(current, p)) {
62632+ return 1;
62633+ }
62634+#endif
62635+ return 0;
62636+}
62637+
62638+EXPORT_SYMBOL(gr_pid_is_chrooted);
62639+
62640+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62641+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62642+{
62643+ struct path path, currentroot;
62644+ int ret = 0;
62645+
62646+ path.dentry = (struct dentry *)u_dentry;
62647+ path.mnt = (struct vfsmount *)u_mnt;
62648+ get_fs_root(current->fs, &currentroot);
62649+ if (path_is_under(&path, &currentroot))
62650+ ret = 1;
62651+ path_put(&currentroot);
62652+
62653+ return ret;
62654+}
62655+#endif
62656+
62657+int
62658+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62659+{
62660+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62661+ if (!grsec_enable_chroot_fchdir)
62662+ return 1;
62663+
62664+ if (!proc_is_chrooted(current))
62665+ return 1;
62666+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62667+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62668+ return 0;
62669+ }
62670+#endif
62671+ return 1;
62672+}
62673+
62674+int
62675+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62676+ const time_t shm_createtime)
62677+{
62678+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62679+ struct task_struct *p;
62680+ time_t starttime;
62681+
62682+ if (unlikely(!grsec_enable_chroot_shmat))
62683+ return 1;
62684+
62685+ if (likely(!proc_is_chrooted(current)))
62686+ return 1;
62687+
62688+ rcu_read_lock();
62689+ read_lock(&tasklist_lock);
62690+
62691+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62692+ starttime = p->start_time.tv_sec;
62693+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62694+ if (have_same_root(current, p)) {
62695+ goto allow;
62696+ } else {
62697+ read_unlock(&tasklist_lock);
62698+ rcu_read_unlock();
62699+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62700+ return 0;
62701+ }
62702+ }
62703+ /* creator exited, pid reuse, fall through to next check */
62704+ }
62705+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62706+ if (unlikely(!have_same_root(current, p))) {
62707+ read_unlock(&tasklist_lock);
62708+ rcu_read_unlock();
62709+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62710+ return 0;
62711+ }
62712+ }
62713+
62714+allow:
62715+ read_unlock(&tasklist_lock);
62716+ rcu_read_unlock();
62717+#endif
62718+ return 1;
62719+}
62720+
62721+void
62722+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62723+{
62724+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62725+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62726+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62727+#endif
62728+ return;
62729+}
62730+
62731+int
62732+gr_handle_chroot_mknod(const struct dentry *dentry,
62733+ const struct vfsmount *mnt, const int mode)
62734+{
62735+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62736+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62737+ proc_is_chrooted(current)) {
62738+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62739+ return -EPERM;
62740+ }
62741+#endif
62742+ return 0;
62743+}
62744+
62745+int
62746+gr_handle_chroot_mount(const struct dentry *dentry,
62747+ const struct vfsmount *mnt, const char *dev_name)
62748+{
62749+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62750+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62751+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
62752+ return -EPERM;
62753+ }
62754+#endif
62755+ return 0;
62756+}
62757+
62758+int
62759+gr_handle_chroot_pivot(void)
62760+{
62761+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62762+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62763+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62764+ return -EPERM;
62765+ }
62766+#endif
62767+ return 0;
62768+}
62769+
62770+int
62771+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62772+{
62773+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62774+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62775+ !gr_is_outside_chroot(dentry, mnt)) {
62776+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62777+ return -EPERM;
62778+ }
62779+#endif
62780+ return 0;
62781+}
62782+
62783+extern const char *captab_log[];
62784+extern int captab_log_entries;
62785+
62786+int
62787+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62788+{
62789+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62790+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
62791+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62792+ if (cap_raised(chroot_caps, cap)) {
62793+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
62794+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
62795+ }
62796+ return 0;
62797+ }
62798+ }
62799+#endif
62800+ return 1;
62801+}
62802+
62803+int
62804+gr_chroot_is_capable(const int cap)
62805+{
62806+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62807+ return gr_task_chroot_is_capable(current, current_cred(), cap);
62808+#endif
62809+ return 1;
62810+}
62811+
62812+int
62813+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
62814+{
62815+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62816+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
62817+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62818+ if (cap_raised(chroot_caps, cap)) {
62819+ return 0;
62820+ }
62821+ }
62822+#endif
62823+ return 1;
62824+}
62825+
62826+int
62827+gr_chroot_is_capable_nolog(const int cap)
62828+{
62829+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62830+ return gr_task_chroot_is_capable_nolog(current, cap);
62831+#endif
62832+ return 1;
62833+}
62834+
62835+int
62836+gr_handle_chroot_sysctl(const int op)
62837+{
62838+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62839+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
62840+ proc_is_chrooted(current))
62841+ return -EACCES;
62842+#endif
62843+ return 0;
62844+}
62845+
62846+void
62847+gr_handle_chroot_chdir(struct path *path)
62848+{
62849+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62850+ if (grsec_enable_chroot_chdir)
62851+ set_fs_pwd(current->fs, path);
62852+#endif
62853+ return;
62854+}
62855+
62856+int
62857+gr_handle_chroot_chmod(const struct dentry *dentry,
62858+ const struct vfsmount *mnt, const int mode)
62859+{
62860+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62861+ /* allow chmod +s on directories, but not files */
62862+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62863+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62864+ proc_is_chrooted(current)) {
62865+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62866+ return -EPERM;
62867+ }
62868+#endif
62869+ return 0;
62870+}
62871diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62872new file mode 100644
62873index 0000000..207d409
62874--- /dev/null
62875+++ b/grsecurity/grsec_disabled.c
62876@@ -0,0 +1,434 @@
62877+#include <linux/kernel.h>
62878+#include <linux/module.h>
62879+#include <linux/sched.h>
62880+#include <linux/file.h>
62881+#include <linux/fs.h>
62882+#include <linux/kdev_t.h>
62883+#include <linux/net.h>
62884+#include <linux/in.h>
62885+#include <linux/ip.h>
62886+#include <linux/skbuff.h>
62887+#include <linux/sysctl.h>
62888+
62889+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62890+void
62891+pax_set_initial_flags(struct linux_binprm *bprm)
62892+{
62893+ return;
62894+}
62895+#endif
62896+
62897+#ifdef CONFIG_SYSCTL
62898+__u32
62899+gr_handle_sysctl(const struct ctl_table * table, const int op)
62900+{
62901+ return 0;
62902+}
62903+#endif
62904+
62905+#ifdef CONFIG_TASKSTATS
62906+int gr_is_taskstats_denied(int pid)
62907+{
62908+ return 0;
62909+}
62910+#endif
62911+
62912+int
62913+gr_acl_is_enabled(void)
62914+{
62915+ return 0;
62916+}
62917+
62918+void
62919+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62920+{
62921+ return;
62922+}
62923+
62924+int
62925+gr_handle_rawio(const struct inode *inode)
62926+{
62927+ return 0;
62928+}
62929+
62930+void
62931+gr_acl_handle_psacct(struct task_struct *task, const long code)
62932+{
62933+ return;
62934+}
62935+
62936+int
62937+gr_handle_ptrace(struct task_struct *task, const long request)
62938+{
62939+ return 0;
62940+}
62941+
62942+int
62943+gr_handle_proc_ptrace(struct task_struct *task)
62944+{
62945+ return 0;
62946+}
62947+
62948+int
62949+gr_set_acls(const int type)
62950+{
62951+ return 0;
62952+}
62953+
62954+int
62955+gr_check_hidden_task(const struct task_struct *tsk)
62956+{
62957+ return 0;
62958+}
62959+
62960+int
62961+gr_check_protected_task(const struct task_struct *task)
62962+{
62963+ return 0;
62964+}
62965+
62966+int
62967+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62968+{
62969+ return 0;
62970+}
62971+
62972+void
62973+gr_copy_label(struct task_struct *tsk)
62974+{
62975+ return;
62976+}
62977+
62978+void
62979+gr_set_pax_flags(struct task_struct *task)
62980+{
62981+ return;
62982+}
62983+
62984+int
62985+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62986+ const int unsafe_share)
62987+{
62988+ return 0;
62989+}
62990+
62991+void
62992+gr_handle_delete(const ino_t ino, const dev_t dev)
62993+{
62994+ return;
62995+}
62996+
62997+void
62998+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62999+{
63000+ return;
63001+}
63002+
63003+void
63004+gr_handle_crash(struct task_struct *task, const int sig)
63005+{
63006+ return;
63007+}
63008+
63009+int
63010+gr_check_crash_exec(const struct file *filp)
63011+{
63012+ return 0;
63013+}
63014+
63015+int
63016+gr_check_crash_uid(const kuid_t uid)
63017+{
63018+ return 0;
63019+}
63020+
63021+void
63022+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
63023+ struct dentry *old_dentry,
63024+ struct dentry *new_dentry,
63025+ struct vfsmount *mnt, const __u8 replace)
63026+{
63027+ return;
63028+}
63029+
63030+int
63031+gr_search_socket(const int family, const int type, const int protocol)
63032+{
63033+ return 1;
63034+}
63035+
63036+int
63037+gr_search_connectbind(const int mode, const struct socket *sock,
63038+ const struct sockaddr_in *addr)
63039+{
63040+ return 0;
63041+}
63042+
63043+void
63044+gr_handle_alertkill(struct task_struct *task)
63045+{
63046+ return;
63047+}
63048+
63049+__u32
63050+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
63051+{
63052+ return 1;
63053+}
63054+
63055+__u32
63056+gr_acl_handle_hidden_file(const struct dentry * dentry,
63057+ const struct vfsmount * mnt)
63058+{
63059+ return 1;
63060+}
63061+
63062+__u32
63063+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
63064+ int acc_mode)
63065+{
63066+ return 1;
63067+}
63068+
63069+__u32
63070+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
63071+{
63072+ return 1;
63073+}
63074+
63075+__u32
63076+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
63077+{
63078+ return 1;
63079+}
63080+
63081+int
63082+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
63083+ unsigned int *vm_flags)
63084+{
63085+ return 1;
63086+}
63087+
63088+__u32
63089+gr_acl_handle_truncate(const struct dentry * dentry,
63090+ const struct vfsmount * mnt)
63091+{
63092+ return 1;
63093+}
63094+
63095+__u32
63096+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
63097+{
63098+ return 1;
63099+}
63100+
63101+__u32
63102+gr_acl_handle_access(const struct dentry * dentry,
63103+ const struct vfsmount * mnt, const int fmode)
63104+{
63105+ return 1;
63106+}
63107+
63108+__u32
63109+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
63110+ umode_t *mode)
63111+{
63112+ return 1;
63113+}
63114+
63115+__u32
63116+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
63117+{
63118+ return 1;
63119+}
63120+
63121+__u32
63122+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
63123+{
63124+ return 1;
63125+}
63126+
63127+void
63128+grsecurity_init(void)
63129+{
63130+ return;
63131+}
63132+
63133+umode_t gr_acl_umask(void)
63134+{
63135+ return 0;
63136+}
63137+
63138+__u32
63139+gr_acl_handle_mknod(const struct dentry * new_dentry,
63140+ const struct dentry * parent_dentry,
63141+ const struct vfsmount * parent_mnt,
63142+ const int mode)
63143+{
63144+ return 1;
63145+}
63146+
63147+__u32
63148+gr_acl_handle_mkdir(const struct dentry * new_dentry,
63149+ const struct dentry * parent_dentry,
63150+ const struct vfsmount * parent_mnt)
63151+{
63152+ return 1;
63153+}
63154+
63155+__u32
63156+gr_acl_handle_symlink(const struct dentry * new_dentry,
63157+ const struct dentry * parent_dentry,
63158+ const struct vfsmount * parent_mnt, const struct filename *from)
63159+{
63160+ return 1;
63161+}
63162+
63163+__u32
63164+gr_acl_handle_link(const struct dentry * new_dentry,
63165+ const struct dentry * parent_dentry,
63166+ const struct vfsmount * parent_mnt,
63167+ const struct dentry * old_dentry,
63168+ const struct vfsmount * old_mnt, const struct filename *to)
63169+{
63170+ return 1;
63171+}
63172+
63173+int
63174+gr_acl_handle_rename(const struct dentry *new_dentry,
63175+ const struct dentry *parent_dentry,
63176+ const struct vfsmount *parent_mnt,
63177+ const struct dentry *old_dentry,
63178+ const struct inode *old_parent_inode,
63179+ const struct vfsmount *old_mnt, const struct filename *newname)
63180+{
63181+ return 0;
63182+}
63183+
63184+int
63185+gr_acl_handle_filldir(const struct file *file, const char *name,
63186+ const int namelen, const ino_t ino)
63187+{
63188+ return 1;
63189+}
63190+
63191+int
63192+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
63193+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
63194+{
63195+ return 1;
63196+}
63197+
63198+int
63199+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
63200+{
63201+ return 0;
63202+}
63203+
63204+int
63205+gr_search_accept(const struct socket *sock)
63206+{
63207+ return 0;
63208+}
63209+
63210+int
63211+gr_search_listen(const struct socket *sock)
63212+{
63213+ return 0;
63214+}
63215+
63216+int
63217+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
63218+{
63219+ return 0;
63220+}
63221+
63222+__u32
63223+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
63224+{
63225+ return 1;
63226+}
63227+
63228+__u32
63229+gr_acl_handle_creat(const struct dentry * dentry,
63230+ const struct dentry * p_dentry,
63231+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
63232+ const int imode)
63233+{
63234+ return 1;
63235+}
63236+
63237+void
63238+gr_acl_handle_exit(void)
63239+{
63240+ return;
63241+}
63242+
63243+int
63244+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
63245+{
63246+ return 1;
63247+}
63248+
63249+void
63250+gr_set_role_label(const kuid_t uid, const kgid_t gid)
63251+{
63252+ return;
63253+}
63254+
63255+int
63256+gr_acl_handle_procpidmem(const struct task_struct *task)
63257+{
63258+ return 0;
63259+}
63260+
63261+int
63262+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
63263+{
63264+ return 0;
63265+}
63266+
63267+int
63268+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
63269+{
63270+ return 0;
63271+}
63272+
63273+void
63274+gr_set_kernel_label(struct task_struct *task)
63275+{
63276+ return;
63277+}
63278+
63279+int
63280+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
63281+{
63282+ return 0;
63283+}
63284+
63285+int
63286+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
63287+{
63288+ return 0;
63289+}
63290+
63291+int gr_acl_enable_at_secure(void)
63292+{
63293+ return 0;
63294+}
63295+
63296+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
63297+{
63298+ return dentry->d_inode->i_sb->s_dev;
63299+}
63300+
63301+void gr_put_exec_file(struct task_struct *task)
63302+{
63303+ return;
63304+}
63305+
63306+EXPORT_SYMBOL(gr_set_kernel_label);
63307+#ifdef CONFIG_SECURITY
63308+EXPORT_SYMBOL(gr_check_user_change);
63309+EXPORT_SYMBOL(gr_check_group_change);
63310+#endif
63311diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
63312new file mode 100644
63313index 0000000..abfa971
63314--- /dev/null
63315+++ b/grsecurity/grsec_exec.c
63316@@ -0,0 +1,174 @@
63317+#include <linux/kernel.h>
63318+#include <linux/sched.h>
63319+#include <linux/file.h>
63320+#include <linux/binfmts.h>
63321+#include <linux/fs.h>
63322+#include <linux/types.h>
63323+#include <linux/grdefs.h>
63324+#include <linux/grsecurity.h>
63325+#include <linux/grinternal.h>
63326+#include <linux/capability.h>
63327+#include <linux/module.h>
63328+
63329+#include <asm/uaccess.h>
63330+
63331+#ifdef CONFIG_GRKERNSEC_EXECLOG
63332+static char gr_exec_arg_buf[132];
63333+static DEFINE_MUTEX(gr_exec_arg_mutex);
63334+#endif
63335+
63336+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
63337+
63338+void
63339+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
63340+{
63341+#ifdef CONFIG_GRKERNSEC_EXECLOG
63342+ char *grarg = gr_exec_arg_buf;
63343+ unsigned int i, x, execlen = 0;
63344+ char c;
63345+
63346+ if (!((grsec_enable_execlog && grsec_enable_group &&
63347+ in_group_p(grsec_audit_gid))
63348+ || (grsec_enable_execlog && !grsec_enable_group)))
63349+ return;
63350+
63351+ mutex_lock(&gr_exec_arg_mutex);
63352+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
63353+
63354+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
63355+ const char __user *p;
63356+ unsigned int len;
63357+
63358+ p = get_user_arg_ptr(argv, i);
63359+ if (IS_ERR(p))
63360+ goto log;
63361+
63362+ len = strnlen_user(p, 128 - execlen);
63363+ if (len > 128 - execlen)
63364+ len = 128 - execlen;
63365+ else if (len > 0)
63366+ len--;
63367+ if (copy_from_user(grarg + execlen, p, len))
63368+ goto log;
63369+
63370+ /* rewrite unprintable characters */
63371+ for (x = 0; x < len; x++) {
63372+ c = *(grarg + execlen + x);
63373+ if (c < 32 || c > 126)
63374+ *(grarg + execlen + x) = ' ';
63375+ }
63376+
63377+ execlen += len;
63378+ *(grarg + execlen) = ' ';
63379+ *(grarg + execlen + 1) = '\0';
63380+ execlen++;
63381+ }
63382+
63383+ log:
63384+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
63385+ bprm->file->f_path.mnt, grarg);
63386+ mutex_unlock(&gr_exec_arg_mutex);
63387+#endif
63388+ return;
63389+}
63390+
63391+#ifdef CONFIG_GRKERNSEC
63392+extern int gr_acl_is_capable(const int cap);
63393+extern int gr_acl_is_capable_nolog(const int cap);
63394+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
63395+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
63396+extern int gr_chroot_is_capable(const int cap);
63397+extern int gr_chroot_is_capable_nolog(const int cap);
63398+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
63399+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
63400+#endif
63401+
63402+const char *captab_log[] = {
63403+ "CAP_CHOWN",
63404+ "CAP_DAC_OVERRIDE",
63405+ "CAP_DAC_READ_SEARCH",
63406+ "CAP_FOWNER",
63407+ "CAP_FSETID",
63408+ "CAP_KILL",
63409+ "CAP_SETGID",
63410+ "CAP_SETUID",
63411+ "CAP_SETPCAP",
63412+ "CAP_LINUX_IMMUTABLE",
63413+ "CAP_NET_BIND_SERVICE",
63414+ "CAP_NET_BROADCAST",
63415+ "CAP_NET_ADMIN",
63416+ "CAP_NET_RAW",
63417+ "CAP_IPC_LOCK",
63418+ "CAP_IPC_OWNER",
63419+ "CAP_SYS_MODULE",
63420+ "CAP_SYS_RAWIO",
63421+ "CAP_SYS_CHROOT",
63422+ "CAP_SYS_PTRACE",
63423+ "CAP_SYS_PACCT",
63424+ "CAP_SYS_ADMIN",
63425+ "CAP_SYS_BOOT",
63426+ "CAP_SYS_NICE",
63427+ "CAP_SYS_RESOURCE",
63428+ "CAP_SYS_TIME",
63429+ "CAP_SYS_TTY_CONFIG",
63430+ "CAP_MKNOD",
63431+ "CAP_LEASE",
63432+ "CAP_AUDIT_WRITE",
63433+ "CAP_AUDIT_CONTROL",
63434+ "CAP_SETFCAP",
63435+ "CAP_MAC_OVERRIDE",
63436+ "CAP_MAC_ADMIN",
63437+ "CAP_SYSLOG",
63438+ "CAP_WAKE_ALARM"
63439+};
63440+
63441+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
63442+
63443+int gr_is_capable(const int cap)
63444+{
63445+#ifdef CONFIG_GRKERNSEC
63446+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
63447+ return 1;
63448+ return 0;
63449+#else
63450+ return 1;
63451+#endif
63452+}
63453+
63454+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
63455+{
63456+#ifdef CONFIG_GRKERNSEC
63457+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
63458+ return 1;
63459+ return 0;
63460+#else
63461+ return 1;
63462+#endif
63463+}
63464+
63465+int gr_is_capable_nolog(const int cap)
63466+{
63467+#ifdef CONFIG_GRKERNSEC
63468+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
63469+ return 1;
63470+ return 0;
63471+#else
63472+ return 1;
63473+#endif
63474+}
63475+
63476+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
63477+{
63478+#ifdef CONFIG_GRKERNSEC
63479+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
63480+ return 1;
63481+ return 0;
63482+#else
63483+ return 1;
63484+#endif
63485+}
63486+
63487+EXPORT_SYMBOL(gr_is_capable);
63488+EXPORT_SYMBOL(gr_is_capable_nolog);
63489+EXPORT_SYMBOL(gr_task_is_capable);
63490+EXPORT_SYMBOL(gr_task_is_capable_nolog);
63491diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
63492new file mode 100644
63493index 0000000..06cc6ea
63494--- /dev/null
63495+++ b/grsecurity/grsec_fifo.c
63496@@ -0,0 +1,24 @@
63497+#include <linux/kernel.h>
63498+#include <linux/sched.h>
63499+#include <linux/fs.h>
63500+#include <linux/file.h>
63501+#include <linux/grinternal.h>
63502+
63503+int
63504+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
63505+ const struct dentry *dir, const int flag, const int acc_mode)
63506+{
63507+#ifdef CONFIG_GRKERNSEC_FIFO
63508+ const struct cred *cred = current_cred();
63509+
63510+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
63511+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
63512+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
63513+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
63514+ if (!inode_permission(dentry->d_inode, acc_mode))
63515+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
63516+ return -EACCES;
63517+ }
63518+#endif
63519+ return 0;
63520+}
63521diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
63522new file mode 100644
63523index 0000000..8ca18bf
63524--- /dev/null
63525+++ b/grsecurity/grsec_fork.c
63526@@ -0,0 +1,23 @@
63527+#include <linux/kernel.h>
63528+#include <linux/sched.h>
63529+#include <linux/grsecurity.h>
63530+#include <linux/grinternal.h>
63531+#include <linux/errno.h>
63532+
63533+void
63534+gr_log_forkfail(const int retval)
63535+{
63536+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63537+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
63538+ switch (retval) {
63539+ case -EAGAIN:
63540+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
63541+ break;
63542+ case -ENOMEM:
63543+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63544+ break;
63545+ }
63546+ }
63547+#endif
63548+ return;
63549+}
63550diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63551new file mode 100644
63552index 0000000..a862e9f
63553--- /dev/null
63554+++ b/grsecurity/grsec_init.c
63555@@ -0,0 +1,283 @@
63556+#include <linux/kernel.h>
63557+#include <linux/sched.h>
63558+#include <linux/mm.h>
63559+#include <linux/gracl.h>
63560+#include <linux/slab.h>
63561+#include <linux/vmalloc.h>
63562+#include <linux/percpu.h>
63563+#include <linux/module.h>
63564+
63565+int grsec_enable_ptrace_readexec;
63566+int grsec_enable_setxid;
63567+int grsec_enable_symlinkown;
63568+kgid_t grsec_symlinkown_gid;
63569+int grsec_enable_brute;
63570+int grsec_enable_link;
63571+int grsec_enable_dmesg;
63572+int grsec_enable_harden_ptrace;
63573+int grsec_enable_fifo;
63574+int grsec_enable_execlog;
63575+int grsec_enable_signal;
63576+int grsec_enable_forkfail;
63577+int grsec_enable_audit_ptrace;
63578+int grsec_enable_time;
63579+int grsec_enable_audit_textrel;
63580+int grsec_enable_group;
63581+kgid_t grsec_audit_gid;
63582+int grsec_enable_chdir;
63583+int grsec_enable_mount;
63584+int grsec_enable_rofs;
63585+int grsec_enable_chroot_findtask;
63586+int grsec_enable_chroot_mount;
63587+int grsec_enable_chroot_shmat;
63588+int grsec_enable_chroot_fchdir;
63589+int grsec_enable_chroot_double;
63590+int grsec_enable_chroot_pivot;
63591+int grsec_enable_chroot_chdir;
63592+int grsec_enable_chroot_chmod;
63593+int grsec_enable_chroot_mknod;
63594+int grsec_enable_chroot_nice;
63595+int grsec_enable_chroot_execlog;
63596+int grsec_enable_chroot_caps;
63597+int grsec_enable_chroot_sysctl;
63598+int grsec_enable_chroot_unix;
63599+int grsec_enable_tpe;
63600+kgid_t grsec_tpe_gid;
63601+int grsec_enable_blackhole;
63602+#ifdef CONFIG_IPV6_MODULE
63603+EXPORT_SYMBOL(grsec_enable_blackhole);
63604+#endif
63605+int grsec_lastack_retries;
63606+int grsec_enable_tpe_all;
63607+int grsec_enable_tpe_invert;
63608+int grsec_enable_socket_all;
63609+kgid_t grsec_socket_all_gid;
63610+int grsec_enable_socket_client;
63611+kgid_t grsec_socket_client_gid;
63612+int grsec_enable_socket_server;
63613+kgid_t grsec_socket_server_gid;
63614+int grsec_resource_logging;
63615+int grsec_disable_privio;
63616+int grsec_enable_log_rwxmaps;
63617+int grsec_lock;
63618+
63619+DEFINE_SPINLOCK(grsec_alert_lock);
63620+unsigned long grsec_alert_wtime = 0;
63621+unsigned long grsec_alert_fyet = 0;
63622+
63623+DEFINE_SPINLOCK(grsec_audit_lock);
63624+
63625+DEFINE_RWLOCK(grsec_exec_file_lock);
63626+
63627+char *gr_shared_page[4];
63628+
63629+char *gr_alert_log_fmt;
63630+char *gr_audit_log_fmt;
63631+char *gr_alert_log_buf;
63632+char *gr_audit_log_buf;
63633+
63634+extern struct gr_arg *gr_usermode;
63635+extern unsigned char *gr_system_salt;
63636+extern unsigned char *gr_system_sum;
63637+
63638+void __init
63639+grsecurity_init(void)
63640+{
63641+ int j;
63642+ /* create the per-cpu shared pages */
63643+
63644+#ifdef CONFIG_X86
63645+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63646+#endif
63647+
63648+ for (j = 0; j < 4; j++) {
63649+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63650+ if (gr_shared_page[j] == NULL) {
63651+ panic("Unable to allocate grsecurity shared page");
63652+ return;
63653+ }
63654+ }
63655+
63656+ /* allocate log buffers */
63657+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63658+ if (!gr_alert_log_fmt) {
63659+ panic("Unable to allocate grsecurity alert log format buffer");
63660+ return;
63661+ }
63662+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63663+ if (!gr_audit_log_fmt) {
63664+ panic("Unable to allocate grsecurity audit log format buffer");
63665+ return;
63666+ }
63667+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63668+ if (!gr_alert_log_buf) {
63669+ panic("Unable to allocate grsecurity alert log buffer");
63670+ return;
63671+ }
63672+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63673+ if (!gr_audit_log_buf) {
63674+ panic("Unable to allocate grsecurity audit log buffer");
63675+ return;
63676+ }
63677+
63678+ /* allocate memory for authentication structure */
63679+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63680+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63681+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63682+
63683+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63684+ panic("Unable to allocate grsecurity authentication structure");
63685+ return;
63686+ }
63687+
63688+
63689+#ifdef CONFIG_GRKERNSEC_IO
63690+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63691+ grsec_disable_privio = 1;
63692+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63693+ grsec_disable_privio = 1;
63694+#else
63695+ grsec_disable_privio = 0;
63696+#endif
63697+#endif
63698+
63699+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63700+ /* for backward compatibility, tpe_invert always defaults to on if
63701+ enabled in the kernel
63702+ */
63703+ grsec_enable_tpe_invert = 1;
63704+#endif
63705+
63706+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63707+#ifndef CONFIG_GRKERNSEC_SYSCTL
63708+ grsec_lock = 1;
63709+#endif
63710+
63711+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63712+ grsec_enable_audit_textrel = 1;
63713+#endif
63714+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63715+ grsec_enable_log_rwxmaps = 1;
63716+#endif
63717+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63718+ grsec_enable_group = 1;
63719+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
63720+#endif
63721+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63722+ grsec_enable_ptrace_readexec = 1;
63723+#endif
63724+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63725+ grsec_enable_chdir = 1;
63726+#endif
63727+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63728+ grsec_enable_harden_ptrace = 1;
63729+#endif
63730+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63731+ grsec_enable_mount = 1;
63732+#endif
63733+#ifdef CONFIG_GRKERNSEC_LINK
63734+ grsec_enable_link = 1;
63735+#endif
63736+#ifdef CONFIG_GRKERNSEC_BRUTE
63737+ grsec_enable_brute = 1;
63738+#endif
63739+#ifdef CONFIG_GRKERNSEC_DMESG
63740+ grsec_enable_dmesg = 1;
63741+#endif
63742+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63743+ grsec_enable_blackhole = 1;
63744+ grsec_lastack_retries = 4;
63745+#endif
63746+#ifdef CONFIG_GRKERNSEC_FIFO
63747+ grsec_enable_fifo = 1;
63748+#endif
63749+#ifdef CONFIG_GRKERNSEC_EXECLOG
63750+ grsec_enable_execlog = 1;
63751+#endif
63752+#ifdef CONFIG_GRKERNSEC_SETXID
63753+ grsec_enable_setxid = 1;
63754+#endif
63755+#ifdef CONFIG_GRKERNSEC_SIGNAL
63756+ grsec_enable_signal = 1;
63757+#endif
63758+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63759+ grsec_enable_forkfail = 1;
63760+#endif
63761+#ifdef CONFIG_GRKERNSEC_TIME
63762+ grsec_enable_time = 1;
63763+#endif
63764+#ifdef CONFIG_GRKERNSEC_RESLOG
63765+ grsec_resource_logging = 1;
63766+#endif
63767+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63768+ grsec_enable_chroot_findtask = 1;
63769+#endif
63770+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63771+ grsec_enable_chroot_unix = 1;
63772+#endif
63773+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63774+ grsec_enable_chroot_mount = 1;
63775+#endif
63776+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63777+ grsec_enable_chroot_fchdir = 1;
63778+#endif
63779+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63780+ grsec_enable_chroot_shmat = 1;
63781+#endif
63782+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63783+ grsec_enable_audit_ptrace = 1;
63784+#endif
63785+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63786+ grsec_enable_chroot_double = 1;
63787+#endif
63788+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63789+ grsec_enable_chroot_pivot = 1;
63790+#endif
63791+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63792+ grsec_enable_chroot_chdir = 1;
63793+#endif
63794+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63795+ grsec_enable_chroot_chmod = 1;
63796+#endif
63797+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63798+ grsec_enable_chroot_mknod = 1;
63799+#endif
63800+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63801+ grsec_enable_chroot_nice = 1;
63802+#endif
63803+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63804+ grsec_enable_chroot_execlog = 1;
63805+#endif
63806+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63807+ grsec_enable_chroot_caps = 1;
63808+#endif
63809+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63810+ grsec_enable_chroot_sysctl = 1;
63811+#endif
63812+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
63813+ grsec_enable_symlinkown = 1;
63814+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
63815+#endif
63816+#ifdef CONFIG_GRKERNSEC_TPE
63817+ grsec_enable_tpe = 1;
63818+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
63819+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63820+ grsec_enable_tpe_all = 1;
63821+#endif
63822+#endif
63823+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63824+ grsec_enable_socket_all = 1;
63825+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
63826+#endif
63827+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63828+ grsec_enable_socket_client = 1;
63829+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
63830+#endif
63831+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63832+ grsec_enable_socket_server = 1;
63833+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
63834+#endif
63835+#endif
63836+
63837+ return;
63838+}
63839diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63840new file mode 100644
63841index 0000000..5e05e20
63842--- /dev/null
63843+++ b/grsecurity/grsec_link.c
63844@@ -0,0 +1,58 @@
63845+#include <linux/kernel.h>
63846+#include <linux/sched.h>
63847+#include <linux/fs.h>
63848+#include <linux/file.h>
63849+#include <linux/grinternal.h>
63850+
63851+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
63852+{
63853+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
63854+ const struct inode *link_inode = link->dentry->d_inode;
63855+
63856+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
63857+ /* ignore root-owned links, e.g. /proc/self */
63858+ gr_is_global_nonroot(link_inode->i_uid) && target &&
63859+ !uid_eq(link_inode->i_uid, target->i_uid)) {
63860+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
63861+ return 1;
63862+ }
63863+#endif
63864+ return 0;
63865+}
63866+
63867+int
63868+gr_handle_follow_link(const struct inode *parent,
63869+ const struct inode *inode,
63870+ const struct dentry *dentry, const struct vfsmount *mnt)
63871+{
63872+#ifdef CONFIG_GRKERNSEC_LINK
63873+ const struct cred *cred = current_cred();
63874+
63875+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63876+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
63877+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
63878+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63879+ return -EACCES;
63880+ }
63881+#endif
63882+ return 0;
63883+}
63884+
63885+int
63886+gr_handle_hardlink(const struct dentry *dentry,
63887+ const struct vfsmount *mnt,
63888+ struct inode *inode, const int mode, const struct filename *to)
63889+{
63890+#ifdef CONFIG_GRKERNSEC_LINK
63891+ const struct cred *cred = current_cred();
63892+
63893+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
63894+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
63895+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63896+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
63897+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
63898+ return -EPERM;
63899+ }
63900+#endif
63901+ return 0;
63902+}
63903diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63904new file mode 100644
63905index 0000000..7c06085
63906--- /dev/null
63907+++ b/grsecurity/grsec_log.c
63908@@ -0,0 +1,326 @@
63909+#include <linux/kernel.h>
63910+#include <linux/sched.h>
63911+#include <linux/file.h>
63912+#include <linux/tty.h>
63913+#include <linux/fs.h>
63914+#include <linux/grinternal.h>
63915+
63916+#ifdef CONFIG_TREE_PREEMPT_RCU
63917+#define DISABLE_PREEMPT() preempt_disable()
63918+#define ENABLE_PREEMPT() preempt_enable()
63919+#else
63920+#define DISABLE_PREEMPT()
63921+#define ENABLE_PREEMPT()
63922+#endif
63923+
63924+#define BEGIN_LOCKS(x) \
63925+ DISABLE_PREEMPT(); \
63926+ rcu_read_lock(); \
63927+ read_lock(&tasklist_lock); \
63928+ read_lock(&grsec_exec_file_lock); \
63929+ if (x != GR_DO_AUDIT) \
63930+ spin_lock(&grsec_alert_lock); \
63931+ else \
63932+ spin_lock(&grsec_audit_lock)
63933+
63934+#define END_LOCKS(x) \
63935+ if (x != GR_DO_AUDIT) \
63936+ spin_unlock(&grsec_alert_lock); \
63937+ else \
63938+ spin_unlock(&grsec_audit_lock); \
63939+ read_unlock(&grsec_exec_file_lock); \
63940+ read_unlock(&tasklist_lock); \
63941+ rcu_read_unlock(); \
63942+ ENABLE_PREEMPT(); \
63943+ if (x == GR_DONT_AUDIT) \
63944+ gr_handle_alertkill(current)
63945+
63946+enum {
63947+ FLOODING,
63948+ NO_FLOODING
63949+};
63950+
63951+extern char *gr_alert_log_fmt;
63952+extern char *gr_audit_log_fmt;
63953+extern char *gr_alert_log_buf;
63954+extern char *gr_audit_log_buf;
63955+
63956+static int gr_log_start(int audit)
63957+{
63958+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63959+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63960+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63961+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63962+ unsigned long curr_secs = get_seconds();
63963+
63964+ if (audit == GR_DO_AUDIT)
63965+ goto set_fmt;
63966+
63967+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63968+ grsec_alert_wtime = curr_secs;
63969+ grsec_alert_fyet = 0;
63970+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63971+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63972+ grsec_alert_fyet++;
63973+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63974+ grsec_alert_wtime = curr_secs;
63975+ grsec_alert_fyet++;
63976+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63977+ return FLOODING;
63978+ }
63979+ else return FLOODING;
63980+
63981+set_fmt:
63982+#endif
63983+ memset(buf, 0, PAGE_SIZE);
63984+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
63985+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63986+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63987+ } else if (current->signal->curr_ip) {
63988+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63989+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63990+ } else if (gr_acl_is_enabled()) {
63991+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63992+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63993+ } else {
63994+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
63995+ strcpy(buf, fmt);
63996+ }
63997+
63998+ return NO_FLOODING;
63999+}
64000+
64001+static void gr_log_middle(int audit, const char *msg, va_list ap)
64002+ __attribute__ ((format (printf, 2, 0)));
64003+
64004+static void gr_log_middle(int audit, const char *msg, va_list ap)
64005+{
64006+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64007+ unsigned int len = strlen(buf);
64008+
64009+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64010+
64011+ return;
64012+}
64013+
64014+static void gr_log_middle_varargs(int audit, const char *msg, ...)
64015+ __attribute__ ((format (printf, 2, 3)));
64016+
64017+static void gr_log_middle_varargs(int audit, const char *msg, ...)
64018+{
64019+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64020+ unsigned int len = strlen(buf);
64021+ va_list ap;
64022+
64023+ va_start(ap, msg);
64024+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
64025+ va_end(ap);
64026+
64027+ return;
64028+}
64029+
64030+static void gr_log_end(int audit, int append_default)
64031+{
64032+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
64033+ if (append_default) {
64034+ struct task_struct *task = current;
64035+ struct task_struct *parent = task->real_parent;
64036+ const struct cred *cred = __task_cred(task);
64037+ const struct cred *pcred = __task_cred(parent);
64038+ unsigned int len = strlen(buf);
64039+
64040+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64041+ }
64042+
64043+ printk("%s\n", buf);
64044+
64045+ return;
64046+}
64047+
64048+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
64049+{
64050+ int logtype;
64051+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
64052+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
64053+ void *voidptr = NULL;
64054+ int num1 = 0, num2 = 0;
64055+ unsigned long ulong1 = 0, ulong2 = 0;
64056+ struct dentry *dentry = NULL;
64057+ struct vfsmount *mnt = NULL;
64058+ struct file *file = NULL;
64059+ struct task_struct *task = NULL;
64060+ const struct cred *cred, *pcred;
64061+ va_list ap;
64062+
64063+ BEGIN_LOCKS(audit);
64064+ logtype = gr_log_start(audit);
64065+ if (logtype == FLOODING) {
64066+ END_LOCKS(audit);
64067+ return;
64068+ }
64069+ va_start(ap, argtypes);
64070+ switch (argtypes) {
64071+ case GR_TTYSNIFF:
64072+ task = va_arg(ap, struct task_struct *);
64073+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
64074+ break;
64075+ case GR_SYSCTL_HIDDEN:
64076+ str1 = va_arg(ap, char *);
64077+ gr_log_middle_varargs(audit, msg, result, str1);
64078+ break;
64079+ case GR_RBAC:
64080+ dentry = va_arg(ap, struct dentry *);
64081+ mnt = va_arg(ap, struct vfsmount *);
64082+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
64083+ break;
64084+ case GR_RBAC_STR:
64085+ dentry = va_arg(ap, struct dentry *);
64086+ mnt = va_arg(ap, struct vfsmount *);
64087+ str1 = va_arg(ap, char *);
64088+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
64089+ break;
64090+ case GR_STR_RBAC:
64091+ str1 = va_arg(ap, char *);
64092+ dentry = va_arg(ap, struct dentry *);
64093+ mnt = va_arg(ap, struct vfsmount *);
64094+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
64095+ break;
64096+ case GR_RBAC_MODE2:
64097+ dentry = va_arg(ap, struct dentry *);
64098+ mnt = va_arg(ap, struct vfsmount *);
64099+ str1 = va_arg(ap, char *);
64100+ str2 = va_arg(ap, char *);
64101+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
64102+ break;
64103+ case GR_RBAC_MODE3:
64104+ dentry = va_arg(ap, struct dentry *);
64105+ mnt = va_arg(ap, struct vfsmount *);
64106+ str1 = va_arg(ap, char *);
64107+ str2 = va_arg(ap, char *);
64108+ str3 = va_arg(ap, char *);
64109+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
64110+ break;
64111+ case GR_FILENAME:
64112+ dentry = va_arg(ap, struct dentry *);
64113+ mnt = va_arg(ap, struct vfsmount *);
64114+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
64115+ break;
64116+ case GR_STR_FILENAME:
64117+ str1 = va_arg(ap, char *);
64118+ dentry = va_arg(ap, struct dentry *);
64119+ mnt = va_arg(ap, struct vfsmount *);
64120+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
64121+ break;
64122+ case GR_FILENAME_STR:
64123+ dentry = va_arg(ap, struct dentry *);
64124+ mnt = va_arg(ap, struct vfsmount *);
64125+ str1 = va_arg(ap, char *);
64126+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
64127+ break;
64128+ case GR_FILENAME_TWO_INT:
64129+ dentry = va_arg(ap, struct dentry *);
64130+ mnt = va_arg(ap, struct vfsmount *);
64131+ num1 = va_arg(ap, int);
64132+ num2 = va_arg(ap, int);
64133+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
64134+ break;
64135+ case GR_FILENAME_TWO_INT_STR:
64136+ dentry = va_arg(ap, struct dentry *);
64137+ mnt = va_arg(ap, struct vfsmount *);
64138+ num1 = va_arg(ap, int);
64139+ num2 = va_arg(ap, int);
64140+ str1 = va_arg(ap, char *);
64141+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
64142+ break;
64143+ case GR_TEXTREL:
64144+ file = va_arg(ap, struct file *);
64145+ ulong1 = va_arg(ap, unsigned long);
64146+ ulong2 = va_arg(ap, unsigned long);
64147+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
64148+ break;
64149+ case GR_PTRACE:
64150+ task = va_arg(ap, struct task_struct *);
64151+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
64152+ break;
64153+ case GR_RESOURCE:
64154+ task = va_arg(ap, struct task_struct *);
64155+ cred = __task_cred(task);
64156+ pcred = __task_cred(task->real_parent);
64157+ ulong1 = va_arg(ap, unsigned long);
64158+ str1 = va_arg(ap, char *);
64159+ ulong2 = va_arg(ap, unsigned long);
64160+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64161+ break;
64162+ case GR_CAP:
64163+ task = va_arg(ap, struct task_struct *);
64164+ cred = __task_cred(task);
64165+ pcred = __task_cred(task->real_parent);
64166+ str1 = va_arg(ap, char *);
64167+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64168+ break;
64169+ case GR_SIG:
64170+ str1 = va_arg(ap, char *);
64171+ voidptr = va_arg(ap, void *);
64172+ gr_log_middle_varargs(audit, msg, str1, voidptr);
64173+ break;
64174+ case GR_SIG2:
64175+ task = va_arg(ap, struct task_struct *);
64176+ cred = __task_cred(task);
64177+ pcred = __task_cred(task->real_parent);
64178+ num1 = va_arg(ap, int);
64179+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64180+ break;
64181+ case GR_CRASH1:
64182+ task = va_arg(ap, struct task_struct *);
64183+ cred = __task_cred(task);
64184+ pcred = __task_cred(task->real_parent);
64185+ ulong1 = va_arg(ap, unsigned long);
64186+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
64187+ break;
64188+ case GR_CRASH2:
64189+ task = va_arg(ap, struct task_struct *);
64190+ cred = __task_cred(task);
64191+ pcred = __task_cred(task->real_parent);
64192+ ulong1 = va_arg(ap, unsigned long);
64193+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
64194+ break;
64195+ case GR_RWXMAP:
64196+ file = va_arg(ap, struct file *);
64197+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
64198+ break;
64199+ case GR_PSACCT:
64200+ {
64201+ unsigned int wday, cday;
64202+ __u8 whr, chr;
64203+ __u8 wmin, cmin;
64204+ __u8 wsec, csec;
64205+ char cur_tty[64] = { 0 };
64206+ char parent_tty[64] = { 0 };
64207+
64208+ task = va_arg(ap, struct task_struct *);
64209+ wday = va_arg(ap, unsigned int);
64210+ cday = va_arg(ap, unsigned int);
64211+ whr = va_arg(ap, int);
64212+ chr = va_arg(ap, int);
64213+ wmin = va_arg(ap, int);
64214+ cmin = va_arg(ap, int);
64215+ wsec = va_arg(ap, int);
64216+ csec = va_arg(ap, int);
64217+ ulong1 = va_arg(ap, unsigned long);
64218+ cred = __task_cred(task);
64219+ pcred = __task_cred(task->real_parent);
64220+
64221+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
64222+ }
64223+ break;
64224+ default:
64225+ gr_log_middle(audit, msg, ap);
64226+ }
64227+ va_end(ap);
64228+ // these don't need DEFAULTSECARGS printed on the end
64229+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
64230+ gr_log_end(audit, 0);
64231+ else
64232+ gr_log_end(audit, 1);
64233+ END_LOCKS(audit);
64234+}
64235diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
64236new file mode 100644
64237index 0000000..f536303
64238--- /dev/null
64239+++ b/grsecurity/grsec_mem.c
64240@@ -0,0 +1,40 @@
64241+#include <linux/kernel.h>
64242+#include <linux/sched.h>
64243+#include <linux/mm.h>
64244+#include <linux/mman.h>
64245+#include <linux/grinternal.h>
64246+
64247+void
64248+gr_handle_ioperm(void)
64249+{
64250+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
64251+ return;
64252+}
64253+
64254+void
64255+gr_handle_iopl(void)
64256+{
64257+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
64258+ return;
64259+}
64260+
64261+void
64262+gr_handle_mem_readwrite(u64 from, u64 to)
64263+{
64264+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
64265+ return;
64266+}
64267+
64268+void
64269+gr_handle_vm86(void)
64270+{
64271+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
64272+ return;
64273+}
64274+
64275+void
64276+gr_log_badprocpid(const char *entry)
64277+{
64278+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
64279+ return;
64280+}
64281diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
64282new file mode 100644
64283index 0000000..2131422
64284--- /dev/null
64285+++ b/grsecurity/grsec_mount.c
64286@@ -0,0 +1,62 @@
64287+#include <linux/kernel.h>
64288+#include <linux/sched.h>
64289+#include <linux/mount.h>
64290+#include <linux/grsecurity.h>
64291+#include <linux/grinternal.h>
64292+
64293+void
64294+gr_log_remount(const char *devname, const int retval)
64295+{
64296+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64297+ if (grsec_enable_mount && (retval >= 0))
64298+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
64299+#endif
64300+ return;
64301+}
64302+
64303+void
64304+gr_log_unmount(const char *devname, const int retval)
64305+{
64306+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64307+ if (grsec_enable_mount && (retval >= 0))
64308+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
64309+#endif
64310+ return;
64311+}
64312+
64313+void
64314+gr_log_mount(const char *from, const char *to, const int retval)
64315+{
64316+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64317+ if (grsec_enable_mount && (retval >= 0))
64318+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
64319+#endif
64320+ return;
64321+}
64322+
64323+int
64324+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
64325+{
64326+#ifdef CONFIG_GRKERNSEC_ROFS
64327+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
64328+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
64329+ return -EPERM;
64330+ } else
64331+ return 0;
64332+#endif
64333+ return 0;
64334+}
64335+
64336+int
64337+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
64338+{
64339+#ifdef CONFIG_GRKERNSEC_ROFS
64340+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
64341+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
64342+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
64343+ return -EPERM;
64344+ } else
64345+ return 0;
64346+#endif
64347+ return 0;
64348+}
64349diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
64350new file mode 100644
64351index 0000000..a3b12a0
64352--- /dev/null
64353+++ b/grsecurity/grsec_pax.c
64354@@ -0,0 +1,36 @@
64355+#include <linux/kernel.h>
64356+#include <linux/sched.h>
64357+#include <linux/mm.h>
64358+#include <linux/file.h>
64359+#include <linux/grinternal.h>
64360+#include <linux/grsecurity.h>
64361+
64362+void
64363+gr_log_textrel(struct vm_area_struct * vma)
64364+{
64365+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64366+ if (grsec_enable_audit_textrel)
64367+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
64368+#endif
64369+ return;
64370+}
64371+
64372+void
64373+gr_log_rwxmmap(struct file *file)
64374+{
64375+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64376+ if (grsec_enable_log_rwxmaps)
64377+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
64378+#endif
64379+ return;
64380+}
64381+
64382+void
64383+gr_log_rwxmprotect(struct file *file)
64384+{
64385+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64386+ if (grsec_enable_log_rwxmaps)
64387+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
64388+#endif
64389+ return;
64390+}
64391diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
64392new file mode 100644
64393index 0000000..f7f29aa
64394--- /dev/null
64395+++ b/grsecurity/grsec_ptrace.c
64396@@ -0,0 +1,30 @@
64397+#include <linux/kernel.h>
64398+#include <linux/sched.h>
64399+#include <linux/grinternal.h>
64400+#include <linux/security.h>
64401+
64402+void
64403+gr_audit_ptrace(struct task_struct *task)
64404+{
64405+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64406+ if (grsec_enable_audit_ptrace)
64407+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
64408+#endif
64409+ return;
64410+}
64411+
64412+int
64413+gr_ptrace_readexec(struct file *file, int unsafe_flags)
64414+{
64415+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64416+ const struct dentry *dentry = file->f_path.dentry;
64417+ const struct vfsmount *mnt = file->f_path.mnt;
64418+
64419+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
64420+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
64421+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
64422+ return -EACCES;
64423+ }
64424+#endif
64425+ return 0;
64426+}
64427diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
64428new file mode 100644
64429index 0000000..e09715a
64430--- /dev/null
64431+++ b/grsecurity/grsec_sig.c
64432@@ -0,0 +1,222 @@
64433+#include <linux/kernel.h>
64434+#include <linux/sched.h>
64435+#include <linux/delay.h>
64436+#include <linux/grsecurity.h>
64437+#include <linux/grinternal.h>
64438+#include <linux/hardirq.h>
64439+
64440+char *signames[] = {
64441+ [SIGSEGV] = "Segmentation fault",
64442+ [SIGILL] = "Illegal instruction",
64443+ [SIGABRT] = "Abort",
64444+ [SIGBUS] = "Invalid alignment/Bus error"
64445+};
64446+
64447+void
64448+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
64449+{
64450+#ifdef CONFIG_GRKERNSEC_SIGNAL
64451+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
64452+ (sig == SIGABRT) || (sig == SIGBUS))) {
64453+ if (task_pid_nr(t) == task_pid_nr(current)) {
64454+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
64455+ } else {
64456+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
64457+ }
64458+ }
64459+#endif
64460+ return;
64461+}
64462+
64463+int
64464+gr_handle_signal(const struct task_struct *p, const int sig)
64465+{
64466+#ifdef CONFIG_GRKERNSEC
64467+ /* ignore the 0 signal for protected task checks */
64468+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
64469+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
64470+ return -EPERM;
64471+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
64472+ return -EPERM;
64473+ }
64474+#endif
64475+ return 0;
64476+}
64477+
64478+#ifdef CONFIG_GRKERNSEC
64479+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
64480+
64481+int gr_fake_force_sig(int sig, struct task_struct *t)
64482+{
64483+ unsigned long int flags;
64484+ int ret, blocked, ignored;
64485+ struct k_sigaction *action;
64486+
64487+ spin_lock_irqsave(&t->sighand->siglock, flags);
64488+ action = &t->sighand->action[sig-1];
64489+ ignored = action->sa.sa_handler == SIG_IGN;
64490+ blocked = sigismember(&t->blocked, sig);
64491+ if (blocked || ignored) {
64492+ action->sa.sa_handler = SIG_DFL;
64493+ if (blocked) {
64494+ sigdelset(&t->blocked, sig);
64495+ recalc_sigpending_and_wake(t);
64496+ }
64497+ }
64498+ if (action->sa.sa_handler == SIG_DFL)
64499+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
64500+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
64501+
64502+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
64503+
64504+ return ret;
64505+}
64506+#endif
64507+
64508+#ifdef CONFIG_GRKERNSEC_BRUTE
64509+#define GR_USER_BAN_TIME (15 * 60)
64510+#define GR_DAEMON_BRUTE_TIME (30 * 60)
64511+
64512+static int __get_dumpable(unsigned long mm_flags)
64513+{
64514+ int ret;
64515+
64516+ ret = mm_flags & MMF_DUMPABLE_MASK;
64517+ return (ret >= 2) ? 2 : ret;
64518+}
64519+#endif
64520+
64521+void gr_handle_brute_attach(unsigned long mm_flags)
64522+{
64523+#ifdef CONFIG_GRKERNSEC_BRUTE
64524+ struct task_struct *p = current;
64525+ kuid_t uid = GLOBAL_ROOT_UID;
64526+ int daemon = 0;
64527+
64528+ if (!grsec_enable_brute)
64529+ return;
64530+
64531+ rcu_read_lock();
64532+ read_lock(&tasklist_lock);
64533+ read_lock(&grsec_exec_file_lock);
64534+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) {
64535+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
64536+ p->real_parent->brute = 1;
64537+ daemon = 1;
64538+ } else {
64539+ const struct cred *cred = __task_cred(p), *cred2;
64540+ struct task_struct *tsk, *tsk2;
64541+
64542+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
64543+ struct user_struct *user;
64544+
64545+ uid = cred->uid;
64546+
64547+ /* this is put upon execution past expiration */
64548+ user = find_user(uid);
64549+ if (user == NULL)
64550+ goto unlock;
64551+ user->banned = 1;
64552+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
64553+ if (user->ban_expires == ~0UL)
64554+ user->ban_expires--;
64555+
64556+ do_each_thread(tsk2, tsk) {
64557+ cred2 = __task_cred(tsk);
64558+ if (tsk != p && uid_eq(cred2->uid, uid))
64559+ gr_fake_force_sig(SIGKILL, tsk);
64560+ } while_each_thread(tsk2, tsk);
64561+ }
64562+ }
64563+unlock:
64564+ read_unlock(&grsec_exec_file_lock);
64565+ read_unlock(&tasklist_lock);
64566+ rcu_read_unlock();
64567+
64568+ if (gr_is_global_nonroot(uid))
64569+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n",
64570+ GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
64571+ else if (daemon)
64572+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
64573+
64574+#endif
64575+ return;
64576+}
64577+
64578+void gr_handle_brute_check(void)
64579+{
64580+#ifdef CONFIG_GRKERNSEC_BRUTE
64581+ struct task_struct *p = current;
64582+
64583+ if (unlikely(p->brute)) {
64584+ if (!grsec_enable_brute)
64585+ p->brute = 0;
64586+ else if (time_before(get_seconds(), p->brute_expires))
64587+ msleep(30 * 1000);
64588+ }
64589+#endif
64590+ return;
64591+}
64592+
64593+void gr_handle_kernel_exploit(void)
64594+{
64595+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64596+ const struct cred *cred;
64597+ struct task_struct *tsk, *tsk2;
64598+ struct user_struct *user;
64599+ kuid_t uid;
64600+
64601+ if (in_irq() || in_serving_softirq() || in_nmi())
64602+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64603+
64604+ uid = current_uid();
64605+
64606+ if (gr_is_global_root(uid))
64607+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
64608+ else {
64609+ /* kill all the processes of this user, hold a reference
64610+ to their creds struct, and prevent them from creating
64611+ another process until system reset
64612+ */
64613+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
64614+ GR_GLOBAL_UID(uid));
64615+ /* we intentionally leak this ref */
64616+ user = get_uid(current->cred->user);
64617+ if (user) {
64618+ user->banned = 1;
64619+ user->ban_expires = ~0UL;
64620+ }
64621+
64622+ read_lock(&tasklist_lock);
64623+ do_each_thread(tsk2, tsk) {
64624+ cred = __task_cred(tsk);
64625+ if (uid_eq(cred->uid, uid))
64626+ gr_fake_force_sig(SIGKILL, tsk);
64627+ } while_each_thread(tsk2, tsk);
64628+ read_unlock(&tasklist_lock);
64629+ }
64630+#endif
64631+}
64632+
64633+int __gr_process_user_ban(struct user_struct *user)
64634+{
64635+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64636+ if (unlikely(user->banned)) {
64637+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64638+ user->banned = 0;
64639+ user->ban_expires = 0;
64640+ free_uid(user);
64641+ } else
64642+ return -EPERM;
64643+ }
64644+#endif
64645+ return 0;
64646+}
64647+
64648+int gr_process_user_ban(void)
64649+{
64650+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64651+ return __gr_process_user_ban(current->cred->user);
64652+#endif
64653+ return 0;
64654+}
64655diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64656new file mode 100644
64657index 0000000..4030d57
64658--- /dev/null
64659+++ b/grsecurity/grsec_sock.c
64660@@ -0,0 +1,244 @@
64661+#include <linux/kernel.h>
64662+#include <linux/module.h>
64663+#include <linux/sched.h>
64664+#include <linux/file.h>
64665+#include <linux/net.h>
64666+#include <linux/in.h>
64667+#include <linux/ip.h>
64668+#include <net/sock.h>
64669+#include <net/inet_sock.h>
64670+#include <linux/grsecurity.h>
64671+#include <linux/grinternal.h>
64672+#include <linux/gracl.h>
64673+
64674+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64675+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64676+
64677+EXPORT_SYMBOL(gr_search_udp_recvmsg);
64678+EXPORT_SYMBOL(gr_search_udp_sendmsg);
64679+
64680+#ifdef CONFIG_UNIX_MODULE
64681+EXPORT_SYMBOL(gr_acl_handle_unix);
64682+EXPORT_SYMBOL(gr_acl_handle_mknod);
64683+EXPORT_SYMBOL(gr_handle_chroot_unix);
64684+EXPORT_SYMBOL(gr_handle_create);
64685+#endif
64686+
64687+#ifdef CONFIG_GRKERNSEC
64688+#define gr_conn_table_size 32749
64689+struct conn_table_entry {
64690+ struct conn_table_entry *next;
64691+ struct signal_struct *sig;
64692+};
64693+
64694+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64695+DEFINE_SPINLOCK(gr_conn_table_lock);
64696+
64697+extern const char * gr_socktype_to_name(unsigned char type);
64698+extern const char * gr_proto_to_name(unsigned char proto);
64699+extern const char * gr_sockfamily_to_name(unsigned char family);
64700+
64701+static __inline__ int
64702+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64703+{
64704+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64705+}
64706+
64707+static __inline__ int
64708+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64709+ __u16 sport, __u16 dport)
64710+{
64711+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64712+ sig->gr_sport == sport && sig->gr_dport == dport))
64713+ return 1;
64714+ else
64715+ return 0;
64716+}
64717+
64718+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64719+{
64720+ struct conn_table_entry **match;
64721+ unsigned int index;
64722+
64723+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64724+ sig->gr_sport, sig->gr_dport,
64725+ gr_conn_table_size);
64726+
64727+ newent->sig = sig;
64728+
64729+ match = &gr_conn_table[index];
64730+ newent->next = *match;
64731+ *match = newent;
64732+
64733+ return;
64734+}
64735+
64736+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64737+{
64738+ struct conn_table_entry *match, *last = NULL;
64739+ unsigned int index;
64740+
64741+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64742+ sig->gr_sport, sig->gr_dport,
64743+ gr_conn_table_size);
64744+
64745+ match = gr_conn_table[index];
64746+ while (match && !conn_match(match->sig,
64747+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64748+ sig->gr_dport)) {
64749+ last = match;
64750+ match = match->next;
64751+ }
64752+
64753+ if (match) {
64754+ if (last)
64755+ last->next = match->next;
64756+ else
64757+ gr_conn_table[index] = NULL;
64758+ kfree(match);
64759+ }
64760+
64761+ return;
64762+}
64763+
64764+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64765+ __u16 sport, __u16 dport)
64766+{
64767+ struct conn_table_entry *match;
64768+ unsigned int index;
64769+
64770+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64771+
64772+ match = gr_conn_table[index];
64773+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64774+ match = match->next;
64775+
64776+ if (match)
64777+ return match->sig;
64778+ else
64779+ return NULL;
64780+}
64781+
64782+#endif
64783+
64784+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64785+{
64786+#ifdef CONFIG_GRKERNSEC
64787+ struct signal_struct *sig = task->signal;
64788+ struct conn_table_entry *newent;
64789+
64790+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64791+ if (newent == NULL)
64792+ return;
64793+ /* no bh lock needed since we are called with bh disabled */
64794+ spin_lock(&gr_conn_table_lock);
64795+ gr_del_task_from_ip_table_nolock(sig);
64796+ sig->gr_saddr = inet->inet_rcv_saddr;
64797+ sig->gr_daddr = inet->inet_daddr;
64798+ sig->gr_sport = inet->inet_sport;
64799+ sig->gr_dport = inet->inet_dport;
64800+ gr_add_to_task_ip_table_nolock(sig, newent);
64801+ spin_unlock(&gr_conn_table_lock);
64802+#endif
64803+ return;
64804+}
64805+
64806+void gr_del_task_from_ip_table(struct task_struct *task)
64807+{
64808+#ifdef CONFIG_GRKERNSEC
64809+ spin_lock_bh(&gr_conn_table_lock);
64810+ gr_del_task_from_ip_table_nolock(task->signal);
64811+ spin_unlock_bh(&gr_conn_table_lock);
64812+#endif
64813+ return;
64814+}
64815+
64816+void
64817+gr_attach_curr_ip(const struct sock *sk)
64818+{
64819+#ifdef CONFIG_GRKERNSEC
64820+ struct signal_struct *p, *set;
64821+ const struct inet_sock *inet = inet_sk(sk);
64822+
64823+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64824+ return;
64825+
64826+ set = current->signal;
64827+
64828+ spin_lock_bh(&gr_conn_table_lock);
64829+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
64830+ inet->inet_dport, inet->inet_sport);
64831+ if (unlikely(p != NULL)) {
64832+ set->curr_ip = p->curr_ip;
64833+ set->used_accept = 1;
64834+ gr_del_task_from_ip_table_nolock(p);
64835+ spin_unlock_bh(&gr_conn_table_lock);
64836+ return;
64837+ }
64838+ spin_unlock_bh(&gr_conn_table_lock);
64839+
64840+ set->curr_ip = inet->inet_daddr;
64841+ set->used_accept = 1;
64842+#endif
64843+ return;
64844+}
64845+
64846+int
64847+gr_handle_sock_all(const int family, const int type, const int protocol)
64848+{
64849+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64850+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64851+ (family != AF_UNIX)) {
64852+ if (family == AF_INET)
64853+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64854+ else
64855+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64856+ return -EACCES;
64857+ }
64858+#endif
64859+ return 0;
64860+}
64861+
64862+int
64863+gr_handle_sock_server(const struct sockaddr *sck)
64864+{
64865+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64866+ if (grsec_enable_socket_server &&
64867+ in_group_p(grsec_socket_server_gid) &&
64868+ sck && (sck->sa_family != AF_UNIX) &&
64869+ (sck->sa_family != AF_LOCAL)) {
64870+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64871+ return -EACCES;
64872+ }
64873+#endif
64874+ return 0;
64875+}
64876+
64877+int
64878+gr_handle_sock_server_other(const struct sock *sck)
64879+{
64880+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64881+ if (grsec_enable_socket_server &&
64882+ in_group_p(grsec_socket_server_gid) &&
64883+ sck && (sck->sk_family != AF_UNIX) &&
64884+ (sck->sk_family != AF_LOCAL)) {
64885+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64886+ return -EACCES;
64887+ }
64888+#endif
64889+ return 0;
64890+}
64891+
64892+int
64893+gr_handle_sock_client(const struct sockaddr *sck)
64894+{
64895+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64896+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64897+ sck && (sck->sa_family != AF_UNIX) &&
64898+ (sck->sa_family != AF_LOCAL)) {
64899+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64900+ return -EACCES;
64901+ }
64902+#endif
64903+ return 0;
64904+}
64905diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64906new file mode 100644
64907index 0000000..f55ef0f
64908--- /dev/null
64909+++ b/grsecurity/grsec_sysctl.c
64910@@ -0,0 +1,469 @@
64911+#include <linux/kernel.h>
64912+#include <linux/sched.h>
64913+#include <linux/sysctl.h>
64914+#include <linux/grsecurity.h>
64915+#include <linux/grinternal.h>
64916+
64917+int
64918+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64919+{
64920+#ifdef CONFIG_GRKERNSEC_SYSCTL
64921+ if (dirname == NULL || name == NULL)
64922+ return 0;
64923+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64924+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64925+ return -EACCES;
64926+ }
64927+#endif
64928+ return 0;
64929+}
64930+
64931+#ifdef CONFIG_GRKERNSEC_ROFS
64932+static int __maybe_unused one = 1;
64933+#endif
64934+
64935+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64936+struct ctl_table grsecurity_table[] = {
64937+#ifdef CONFIG_GRKERNSEC_SYSCTL
64938+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64939+#ifdef CONFIG_GRKERNSEC_IO
64940+ {
64941+ .procname = "disable_priv_io",
64942+ .data = &grsec_disable_privio,
64943+ .maxlen = sizeof(int),
64944+ .mode = 0600,
64945+ .proc_handler = &proc_dointvec,
64946+ },
64947+#endif
64948+#endif
64949+#ifdef CONFIG_GRKERNSEC_LINK
64950+ {
64951+ .procname = "linking_restrictions",
64952+ .data = &grsec_enable_link,
64953+ .maxlen = sizeof(int),
64954+ .mode = 0600,
64955+ .proc_handler = &proc_dointvec,
64956+ },
64957+#endif
64958+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
64959+ {
64960+ .procname = "enforce_symlinksifowner",
64961+ .data = &grsec_enable_symlinkown,
64962+ .maxlen = sizeof(int),
64963+ .mode = 0600,
64964+ .proc_handler = &proc_dointvec,
64965+ },
64966+ {
64967+ .procname = "symlinkown_gid",
64968+ .data = &grsec_symlinkown_gid,
64969+ .maxlen = sizeof(int),
64970+ .mode = 0600,
64971+ .proc_handler = &proc_dointvec,
64972+ },
64973+#endif
64974+#ifdef CONFIG_GRKERNSEC_BRUTE
64975+ {
64976+ .procname = "deter_bruteforce",
64977+ .data = &grsec_enable_brute,
64978+ .maxlen = sizeof(int),
64979+ .mode = 0600,
64980+ .proc_handler = &proc_dointvec,
64981+ },
64982+#endif
64983+#ifdef CONFIG_GRKERNSEC_FIFO
64984+ {
64985+ .procname = "fifo_restrictions",
64986+ .data = &grsec_enable_fifo,
64987+ .maxlen = sizeof(int),
64988+ .mode = 0600,
64989+ .proc_handler = &proc_dointvec,
64990+ },
64991+#endif
64992+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64993+ {
64994+ .procname = "ptrace_readexec",
64995+ .data = &grsec_enable_ptrace_readexec,
64996+ .maxlen = sizeof(int),
64997+ .mode = 0600,
64998+ .proc_handler = &proc_dointvec,
64999+ },
65000+#endif
65001+#ifdef CONFIG_GRKERNSEC_SETXID
65002+ {
65003+ .procname = "consistent_setxid",
65004+ .data = &grsec_enable_setxid,
65005+ .maxlen = sizeof(int),
65006+ .mode = 0600,
65007+ .proc_handler = &proc_dointvec,
65008+ },
65009+#endif
65010+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65011+ {
65012+ .procname = "ip_blackhole",
65013+ .data = &grsec_enable_blackhole,
65014+ .maxlen = sizeof(int),
65015+ .mode = 0600,
65016+ .proc_handler = &proc_dointvec,
65017+ },
65018+ {
65019+ .procname = "lastack_retries",
65020+ .data = &grsec_lastack_retries,
65021+ .maxlen = sizeof(int),
65022+ .mode = 0600,
65023+ .proc_handler = &proc_dointvec,
65024+ },
65025+#endif
65026+#ifdef CONFIG_GRKERNSEC_EXECLOG
65027+ {
65028+ .procname = "exec_logging",
65029+ .data = &grsec_enable_execlog,
65030+ .maxlen = sizeof(int),
65031+ .mode = 0600,
65032+ .proc_handler = &proc_dointvec,
65033+ },
65034+#endif
65035+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65036+ {
65037+ .procname = "rwxmap_logging",
65038+ .data = &grsec_enable_log_rwxmaps,
65039+ .maxlen = sizeof(int),
65040+ .mode = 0600,
65041+ .proc_handler = &proc_dointvec,
65042+ },
65043+#endif
65044+#ifdef CONFIG_GRKERNSEC_SIGNAL
65045+ {
65046+ .procname = "signal_logging",
65047+ .data = &grsec_enable_signal,
65048+ .maxlen = sizeof(int),
65049+ .mode = 0600,
65050+ .proc_handler = &proc_dointvec,
65051+ },
65052+#endif
65053+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65054+ {
65055+ .procname = "forkfail_logging",
65056+ .data = &grsec_enable_forkfail,
65057+ .maxlen = sizeof(int),
65058+ .mode = 0600,
65059+ .proc_handler = &proc_dointvec,
65060+ },
65061+#endif
65062+#ifdef CONFIG_GRKERNSEC_TIME
65063+ {
65064+ .procname = "timechange_logging",
65065+ .data = &grsec_enable_time,
65066+ .maxlen = sizeof(int),
65067+ .mode = 0600,
65068+ .proc_handler = &proc_dointvec,
65069+ },
65070+#endif
65071+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65072+ {
65073+ .procname = "chroot_deny_shmat",
65074+ .data = &grsec_enable_chroot_shmat,
65075+ .maxlen = sizeof(int),
65076+ .mode = 0600,
65077+ .proc_handler = &proc_dointvec,
65078+ },
65079+#endif
65080+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65081+ {
65082+ .procname = "chroot_deny_unix",
65083+ .data = &grsec_enable_chroot_unix,
65084+ .maxlen = sizeof(int),
65085+ .mode = 0600,
65086+ .proc_handler = &proc_dointvec,
65087+ },
65088+#endif
65089+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65090+ {
65091+ .procname = "chroot_deny_mount",
65092+ .data = &grsec_enable_chroot_mount,
65093+ .maxlen = sizeof(int),
65094+ .mode = 0600,
65095+ .proc_handler = &proc_dointvec,
65096+ },
65097+#endif
65098+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65099+ {
65100+ .procname = "chroot_deny_fchdir",
65101+ .data = &grsec_enable_chroot_fchdir,
65102+ .maxlen = sizeof(int),
65103+ .mode = 0600,
65104+ .proc_handler = &proc_dointvec,
65105+ },
65106+#endif
65107+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65108+ {
65109+ .procname = "chroot_deny_chroot",
65110+ .data = &grsec_enable_chroot_double,
65111+ .maxlen = sizeof(int),
65112+ .mode = 0600,
65113+ .proc_handler = &proc_dointvec,
65114+ },
65115+#endif
65116+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65117+ {
65118+ .procname = "chroot_deny_pivot",
65119+ .data = &grsec_enable_chroot_pivot,
65120+ .maxlen = sizeof(int),
65121+ .mode = 0600,
65122+ .proc_handler = &proc_dointvec,
65123+ },
65124+#endif
65125+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65126+ {
65127+ .procname = "chroot_enforce_chdir",
65128+ .data = &grsec_enable_chroot_chdir,
65129+ .maxlen = sizeof(int),
65130+ .mode = 0600,
65131+ .proc_handler = &proc_dointvec,
65132+ },
65133+#endif
65134+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65135+ {
65136+ .procname = "chroot_deny_chmod",
65137+ .data = &grsec_enable_chroot_chmod,
65138+ .maxlen = sizeof(int),
65139+ .mode = 0600,
65140+ .proc_handler = &proc_dointvec,
65141+ },
65142+#endif
65143+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65144+ {
65145+ .procname = "chroot_deny_mknod",
65146+ .data = &grsec_enable_chroot_mknod,
65147+ .maxlen = sizeof(int),
65148+ .mode = 0600,
65149+ .proc_handler = &proc_dointvec,
65150+ },
65151+#endif
65152+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65153+ {
65154+ .procname = "chroot_restrict_nice",
65155+ .data = &grsec_enable_chroot_nice,
65156+ .maxlen = sizeof(int),
65157+ .mode = 0600,
65158+ .proc_handler = &proc_dointvec,
65159+ },
65160+#endif
65161+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65162+ {
65163+ .procname = "chroot_execlog",
65164+ .data = &grsec_enable_chroot_execlog,
65165+ .maxlen = sizeof(int),
65166+ .mode = 0600,
65167+ .proc_handler = &proc_dointvec,
65168+ },
65169+#endif
65170+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65171+ {
65172+ .procname = "chroot_caps",
65173+ .data = &grsec_enable_chroot_caps,
65174+ .maxlen = sizeof(int),
65175+ .mode = 0600,
65176+ .proc_handler = &proc_dointvec,
65177+ },
65178+#endif
65179+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65180+ {
65181+ .procname = "chroot_deny_sysctl",
65182+ .data = &grsec_enable_chroot_sysctl,
65183+ .maxlen = sizeof(int),
65184+ .mode = 0600,
65185+ .proc_handler = &proc_dointvec,
65186+ },
65187+#endif
65188+#ifdef CONFIG_GRKERNSEC_TPE
65189+ {
65190+ .procname = "tpe",
65191+ .data = &grsec_enable_tpe,
65192+ .maxlen = sizeof(int),
65193+ .mode = 0600,
65194+ .proc_handler = &proc_dointvec,
65195+ },
65196+ {
65197+ .procname = "tpe_gid",
65198+ .data = &grsec_tpe_gid,
65199+ .maxlen = sizeof(int),
65200+ .mode = 0600,
65201+ .proc_handler = &proc_dointvec,
65202+ },
65203+#endif
65204+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65205+ {
65206+ .procname = "tpe_invert",
65207+ .data = &grsec_enable_tpe_invert,
65208+ .maxlen = sizeof(int),
65209+ .mode = 0600,
65210+ .proc_handler = &proc_dointvec,
65211+ },
65212+#endif
65213+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65214+ {
65215+ .procname = "tpe_restrict_all",
65216+ .data = &grsec_enable_tpe_all,
65217+ .maxlen = sizeof(int),
65218+ .mode = 0600,
65219+ .proc_handler = &proc_dointvec,
65220+ },
65221+#endif
65222+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65223+ {
65224+ .procname = "socket_all",
65225+ .data = &grsec_enable_socket_all,
65226+ .maxlen = sizeof(int),
65227+ .mode = 0600,
65228+ .proc_handler = &proc_dointvec,
65229+ },
65230+ {
65231+ .procname = "socket_all_gid",
65232+ .data = &grsec_socket_all_gid,
65233+ .maxlen = sizeof(int),
65234+ .mode = 0600,
65235+ .proc_handler = &proc_dointvec,
65236+ },
65237+#endif
65238+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65239+ {
65240+ .procname = "socket_client",
65241+ .data = &grsec_enable_socket_client,
65242+ .maxlen = sizeof(int),
65243+ .mode = 0600,
65244+ .proc_handler = &proc_dointvec,
65245+ },
65246+ {
65247+ .procname = "socket_client_gid",
65248+ .data = &grsec_socket_client_gid,
65249+ .maxlen = sizeof(int),
65250+ .mode = 0600,
65251+ .proc_handler = &proc_dointvec,
65252+ },
65253+#endif
65254+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65255+ {
65256+ .procname = "socket_server",
65257+ .data = &grsec_enable_socket_server,
65258+ .maxlen = sizeof(int),
65259+ .mode = 0600,
65260+ .proc_handler = &proc_dointvec,
65261+ },
65262+ {
65263+ .procname = "socket_server_gid",
65264+ .data = &grsec_socket_server_gid,
65265+ .maxlen = sizeof(int),
65266+ .mode = 0600,
65267+ .proc_handler = &proc_dointvec,
65268+ },
65269+#endif
65270+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65271+ {
65272+ .procname = "audit_group",
65273+ .data = &grsec_enable_group,
65274+ .maxlen = sizeof(int),
65275+ .mode = 0600,
65276+ .proc_handler = &proc_dointvec,
65277+ },
65278+ {
65279+ .procname = "audit_gid",
65280+ .data = &grsec_audit_gid,
65281+ .maxlen = sizeof(int),
65282+ .mode = 0600,
65283+ .proc_handler = &proc_dointvec,
65284+ },
65285+#endif
65286+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65287+ {
65288+ .procname = "audit_chdir",
65289+ .data = &grsec_enable_chdir,
65290+ .maxlen = sizeof(int),
65291+ .mode = 0600,
65292+ .proc_handler = &proc_dointvec,
65293+ },
65294+#endif
65295+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65296+ {
65297+ .procname = "audit_mount",
65298+ .data = &grsec_enable_mount,
65299+ .maxlen = sizeof(int),
65300+ .mode = 0600,
65301+ .proc_handler = &proc_dointvec,
65302+ },
65303+#endif
65304+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65305+ {
65306+ .procname = "audit_textrel",
65307+ .data = &grsec_enable_audit_textrel,
65308+ .maxlen = sizeof(int),
65309+ .mode = 0600,
65310+ .proc_handler = &proc_dointvec,
65311+ },
65312+#endif
65313+#ifdef CONFIG_GRKERNSEC_DMESG
65314+ {
65315+ .procname = "dmesg",
65316+ .data = &grsec_enable_dmesg,
65317+ .maxlen = sizeof(int),
65318+ .mode = 0600,
65319+ .proc_handler = &proc_dointvec,
65320+ },
65321+#endif
65322+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65323+ {
65324+ .procname = "chroot_findtask",
65325+ .data = &grsec_enable_chroot_findtask,
65326+ .maxlen = sizeof(int),
65327+ .mode = 0600,
65328+ .proc_handler = &proc_dointvec,
65329+ },
65330+#endif
65331+#ifdef CONFIG_GRKERNSEC_RESLOG
65332+ {
65333+ .procname = "resource_logging",
65334+ .data = &grsec_resource_logging,
65335+ .maxlen = sizeof(int),
65336+ .mode = 0600,
65337+ .proc_handler = &proc_dointvec,
65338+ },
65339+#endif
65340+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65341+ {
65342+ .procname = "audit_ptrace",
65343+ .data = &grsec_enable_audit_ptrace,
65344+ .maxlen = sizeof(int),
65345+ .mode = 0600,
65346+ .proc_handler = &proc_dointvec,
65347+ },
65348+#endif
65349+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65350+ {
65351+ .procname = "harden_ptrace",
65352+ .data = &grsec_enable_harden_ptrace,
65353+ .maxlen = sizeof(int),
65354+ .mode = 0600,
65355+ .proc_handler = &proc_dointvec,
65356+ },
65357+#endif
65358+ {
65359+ .procname = "grsec_lock",
65360+ .data = &grsec_lock,
65361+ .maxlen = sizeof(int),
65362+ .mode = 0600,
65363+ .proc_handler = &proc_dointvec,
65364+ },
65365+#endif
65366+#ifdef CONFIG_GRKERNSEC_ROFS
65367+ {
65368+ .procname = "romount_protect",
65369+ .data = &grsec_enable_rofs,
65370+ .maxlen = sizeof(int),
65371+ .mode = 0600,
65372+ .proc_handler = &proc_dointvec_minmax,
65373+ .extra1 = &one,
65374+ .extra2 = &one,
65375+ },
65376+#endif
65377+ { }
65378+};
65379+#endif
65380diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
65381new file mode 100644
65382index 0000000..0dc13c3
65383--- /dev/null
65384+++ b/grsecurity/grsec_time.c
65385@@ -0,0 +1,16 @@
65386+#include <linux/kernel.h>
65387+#include <linux/sched.h>
65388+#include <linux/grinternal.h>
65389+#include <linux/module.h>
65390+
65391+void
65392+gr_log_timechange(void)
65393+{
65394+#ifdef CONFIG_GRKERNSEC_TIME
65395+ if (grsec_enable_time)
65396+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
65397+#endif
65398+ return;
65399+}
65400+
65401+EXPORT_SYMBOL(gr_log_timechange);
65402diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
65403new file mode 100644
65404index 0000000..ee57dcf
65405--- /dev/null
65406+++ b/grsecurity/grsec_tpe.c
65407@@ -0,0 +1,73 @@
65408+#include <linux/kernel.h>
65409+#include <linux/sched.h>
65410+#include <linux/file.h>
65411+#include <linux/fs.h>
65412+#include <linux/grinternal.h>
65413+
65414+extern int gr_acl_tpe_check(void);
65415+
65416+int
65417+gr_tpe_allow(const struct file *file)
65418+{
65419+#ifdef CONFIG_GRKERNSEC
65420+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
65421+ const struct cred *cred = current_cred();
65422+ char *msg = NULL;
65423+ char *msg2 = NULL;
65424+
65425+ // never restrict root
65426+ if (gr_is_global_root(cred->uid))
65427+ return 1;
65428+
65429+ if (grsec_enable_tpe) {
65430+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65431+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
65432+ msg = "not being in trusted group";
65433+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
65434+ msg = "being in untrusted group";
65435+#else
65436+ if (in_group_p(grsec_tpe_gid))
65437+ msg = "being in untrusted group";
65438+#endif
65439+ }
65440+ if (!msg && gr_acl_tpe_check())
65441+ msg = "being in untrusted role";
65442+
65443+ // not in any affected group/role
65444+ if (!msg)
65445+ goto next_check;
65446+
65447+ if (gr_is_global_nonroot(inode->i_uid))
65448+ msg2 = "file in non-root-owned directory";
65449+ else if (inode->i_mode & S_IWOTH)
65450+ msg2 = "file in world-writable directory";
65451+ else if (inode->i_mode & S_IWGRP)
65452+ msg2 = "file in group-writable directory";
65453+
65454+ if (msg && msg2) {
65455+ char fullmsg[70] = {0};
65456+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
65457+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
65458+ return 0;
65459+ }
65460+ msg = NULL;
65461+next_check:
65462+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65463+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
65464+ return 1;
65465+
65466+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
65467+ msg = "directory not owned by user";
65468+ else if (inode->i_mode & S_IWOTH)
65469+ msg = "file in world-writable directory";
65470+ else if (inode->i_mode & S_IWGRP)
65471+ msg = "file in group-writable directory";
65472+
65473+ if (msg) {
65474+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
65475+ return 0;
65476+ }
65477+#endif
65478+#endif
65479+ return 1;
65480+}
65481diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
65482new file mode 100644
65483index 0000000..9f7b1ac
65484--- /dev/null
65485+++ b/grsecurity/grsum.c
65486@@ -0,0 +1,61 @@
65487+#include <linux/err.h>
65488+#include <linux/kernel.h>
65489+#include <linux/sched.h>
65490+#include <linux/mm.h>
65491+#include <linux/scatterlist.h>
65492+#include <linux/crypto.h>
65493+#include <linux/gracl.h>
65494+
65495+
65496+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
65497+#error "crypto and sha256 must be built into the kernel"
65498+#endif
65499+
65500+int
65501+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
65502+{
65503+ char *p;
65504+ struct crypto_hash *tfm;
65505+ struct hash_desc desc;
65506+ struct scatterlist sg;
65507+ unsigned char temp_sum[GR_SHA_LEN];
65508+ volatile int retval = 0;
65509+ volatile int dummy = 0;
65510+ unsigned int i;
65511+
65512+ sg_init_table(&sg, 1);
65513+
65514+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
65515+ if (IS_ERR(tfm)) {
65516+ /* should never happen, since sha256 should be built in */
65517+ return 1;
65518+ }
65519+
65520+ desc.tfm = tfm;
65521+ desc.flags = 0;
65522+
65523+ crypto_hash_init(&desc);
65524+
65525+ p = salt;
65526+ sg_set_buf(&sg, p, GR_SALT_LEN);
65527+ crypto_hash_update(&desc, &sg, sg.length);
65528+
65529+ p = entry->pw;
65530+ sg_set_buf(&sg, p, strlen(p));
65531+
65532+ crypto_hash_update(&desc, &sg, sg.length);
65533+
65534+ crypto_hash_final(&desc, temp_sum);
65535+
65536+ memset(entry->pw, 0, GR_PW_LEN);
65537+
65538+ for (i = 0; i < GR_SHA_LEN; i++)
65539+ if (sum[i] != temp_sum[i])
65540+ retval = 1;
65541+ else
65542+ dummy = 1; // waste a cycle
65543+
65544+ crypto_free_hash(tfm);
65545+
65546+ return retval;
65547+}
65548diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
65549index 77ff547..181834f 100644
65550--- a/include/asm-generic/4level-fixup.h
65551+++ b/include/asm-generic/4level-fixup.h
65552@@ -13,8 +13,10 @@
65553 #define pmd_alloc(mm, pud, address) \
65554 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
65555 NULL: pmd_offset(pud, address))
65556+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
65557
65558 #define pud_alloc(mm, pgd, address) (pgd)
65559+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
65560 #define pud_offset(pgd, start) (pgd)
65561 #define pud_none(pud) 0
65562 #define pud_bad(pud) 0
65563diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65564index b7babf0..04ad282 100644
65565--- a/include/asm-generic/atomic-long.h
65566+++ b/include/asm-generic/atomic-long.h
65567@@ -22,6 +22,12 @@
65568
65569 typedef atomic64_t atomic_long_t;
65570
65571+#ifdef CONFIG_PAX_REFCOUNT
65572+typedef atomic64_unchecked_t atomic_long_unchecked_t;
65573+#else
65574+typedef atomic64_t atomic_long_unchecked_t;
65575+#endif
65576+
65577 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65578
65579 static inline long atomic_long_read(atomic_long_t *l)
65580@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65581 return (long)atomic64_read(v);
65582 }
65583
65584+#ifdef CONFIG_PAX_REFCOUNT
65585+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65586+{
65587+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65588+
65589+ return (long)atomic64_read_unchecked(v);
65590+}
65591+#endif
65592+
65593 static inline void atomic_long_set(atomic_long_t *l, long i)
65594 {
65595 atomic64_t *v = (atomic64_t *)l;
65596@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65597 atomic64_set(v, i);
65598 }
65599
65600+#ifdef CONFIG_PAX_REFCOUNT
65601+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65602+{
65603+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65604+
65605+ atomic64_set_unchecked(v, i);
65606+}
65607+#endif
65608+
65609 static inline void atomic_long_inc(atomic_long_t *l)
65610 {
65611 atomic64_t *v = (atomic64_t *)l;
65612@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65613 atomic64_inc(v);
65614 }
65615
65616+#ifdef CONFIG_PAX_REFCOUNT
65617+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65618+{
65619+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65620+
65621+ atomic64_inc_unchecked(v);
65622+}
65623+#endif
65624+
65625 static inline void atomic_long_dec(atomic_long_t *l)
65626 {
65627 atomic64_t *v = (atomic64_t *)l;
65628@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65629 atomic64_dec(v);
65630 }
65631
65632+#ifdef CONFIG_PAX_REFCOUNT
65633+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65634+{
65635+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65636+
65637+ atomic64_dec_unchecked(v);
65638+}
65639+#endif
65640+
65641 static inline void atomic_long_add(long i, atomic_long_t *l)
65642 {
65643 atomic64_t *v = (atomic64_t *)l;
65644@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65645 atomic64_add(i, v);
65646 }
65647
65648+#ifdef CONFIG_PAX_REFCOUNT
65649+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65650+{
65651+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65652+
65653+ atomic64_add_unchecked(i, v);
65654+}
65655+#endif
65656+
65657 static inline void atomic_long_sub(long i, atomic_long_t *l)
65658 {
65659 atomic64_t *v = (atomic64_t *)l;
65660@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
65661 atomic64_sub(i, v);
65662 }
65663
65664+#ifdef CONFIG_PAX_REFCOUNT
65665+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
65666+{
65667+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65668+
65669+ atomic64_sub_unchecked(i, v);
65670+}
65671+#endif
65672+
65673 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
65674 {
65675 atomic64_t *v = (atomic64_t *)l;
65676@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
65677 return (long)atomic64_add_return(i, v);
65678 }
65679
65680+#ifdef CONFIG_PAX_REFCOUNT
65681+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
65682+{
65683+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65684+
65685+ return (long)atomic64_add_return_unchecked(i, v);
65686+}
65687+#endif
65688+
65689 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
65690 {
65691 atomic64_t *v = (atomic64_t *)l;
65692@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65693 return (long)atomic64_inc_return(v);
65694 }
65695
65696+#ifdef CONFIG_PAX_REFCOUNT
65697+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65698+{
65699+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65700+
65701+ return (long)atomic64_inc_return_unchecked(v);
65702+}
65703+#endif
65704+
65705 static inline long atomic_long_dec_return(atomic_long_t *l)
65706 {
65707 atomic64_t *v = (atomic64_t *)l;
65708@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65709
65710 typedef atomic_t atomic_long_t;
65711
65712+#ifdef CONFIG_PAX_REFCOUNT
65713+typedef atomic_unchecked_t atomic_long_unchecked_t;
65714+#else
65715+typedef atomic_t atomic_long_unchecked_t;
65716+#endif
65717+
65718 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65719 static inline long atomic_long_read(atomic_long_t *l)
65720 {
65721@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65722 return (long)atomic_read(v);
65723 }
65724
65725+#ifdef CONFIG_PAX_REFCOUNT
65726+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65727+{
65728+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65729+
65730+ return (long)atomic_read_unchecked(v);
65731+}
65732+#endif
65733+
65734 static inline void atomic_long_set(atomic_long_t *l, long i)
65735 {
65736 atomic_t *v = (atomic_t *)l;
65737@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65738 atomic_set(v, i);
65739 }
65740
65741+#ifdef CONFIG_PAX_REFCOUNT
65742+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65743+{
65744+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65745+
65746+ atomic_set_unchecked(v, i);
65747+}
65748+#endif
65749+
65750 static inline void atomic_long_inc(atomic_long_t *l)
65751 {
65752 atomic_t *v = (atomic_t *)l;
65753@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65754 atomic_inc(v);
65755 }
65756
65757+#ifdef CONFIG_PAX_REFCOUNT
65758+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65759+{
65760+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65761+
65762+ atomic_inc_unchecked(v);
65763+}
65764+#endif
65765+
65766 static inline void atomic_long_dec(atomic_long_t *l)
65767 {
65768 atomic_t *v = (atomic_t *)l;
65769@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65770 atomic_dec(v);
65771 }
65772
65773+#ifdef CONFIG_PAX_REFCOUNT
65774+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65775+{
65776+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65777+
65778+ atomic_dec_unchecked(v);
65779+}
65780+#endif
65781+
65782 static inline void atomic_long_add(long i, atomic_long_t *l)
65783 {
65784 atomic_t *v = (atomic_t *)l;
65785@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65786 atomic_add(i, v);
65787 }
65788
65789+#ifdef CONFIG_PAX_REFCOUNT
65790+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65791+{
65792+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65793+
65794+ atomic_add_unchecked(i, v);
65795+}
65796+#endif
65797+
65798 static inline void atomic_long_sub(long i, atomic_long_t *l)
65799 {
65800 atomic_t *v = (atomic_t *)l;
65801@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
65802 atomic_sub(i, v);
65803 }
65804
65805+#ifdef CONFIG_PAX_REFCOUNT
65806+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
65807+{
65808+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65809+
65810+ atomic_sub_unchecked(i, v);
65811+}
65812+#endif
65813+
65814 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
65815 {
65816 atomic_t *v = (atomic_t *)l;
65817@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
65818 return (long)atomic_add_return(i, v);
65819 }
65820
65821+#ifdef CONFIG_PAX_REFCOUNT
65822+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
65823+{
65824+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65825+
65826+ return (long)atomic_add_return_unchecked(i, v);
65827+}
65828+
65829+#endif
65830+
65831 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
65832 {
65833 atomic_t *v = (atomic_t *)l;
65834@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65835 return (long)atomic_inc_return(v);
65836 }
65837
65838+#ifdef CONFIG_PAX_REFCOUNT
65839+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65840+{
65841+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65842+
65843+ return (long)atomic_inc_return_unchecked(v);
65844+}
65845+#endif
65846+
65847 static inline long atomic_long_dec_return(atomic_long_t *l)
65848 {
65849 atomic_t *v = (atomic_t *)l;
65850@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65851
65852 #endif /* BITS_PER_LONG == 64 */
65853
65854+#ifdef CONFIG_PAX_REFCOUNT
65855+static inline void pax_refcount_needs_these_functions(void)
65856+{
65857+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65858+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65859+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65860+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65861+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65862+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65863+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65864+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65865+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65866+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65867+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65868+#ifdef CONFIG_X86
65869+ atomic_clear_mask_unchecked(0, NULL);
65870+ atomic_set_mask_unchecked(0, NULL);
65871+#endif
65872+
65873+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65874+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65875+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65876+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
65877+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65878+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
65879+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65880+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65881+}
65882+#else
65883+#define atomic_read_unchecked(v) atomic_read(v)
65884+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65885+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65886+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65887+#define atomic_inc_unchecked(v) atomic_inc(v)
65888+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65889+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65890+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65891+#define atomic_dec_unchecked(v) atomic_dec(v)
65892+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65893+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65894+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
65895+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
65896+
65897+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65898+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65899+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65900+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
65901+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65902+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
65903+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65904+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65905+#endif
65906+
65907 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65908diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
65909index 1ced641..c896ee8 100644
65910--- a/include/asm-generic/atomic.h
65911+++ b/include/asm-generic/atomic.h
65912@@ -159,7 +159,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
65913 * Atomically clears the bits set in @mask from @v
65914 */
65915 #ifndef atomic_clear_mask
65916-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
65917+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
65918 {
65919 unsigned long flags;
65920
65921diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65922index b18ce4f..2ee2843 100644
65923--- a/include/asm-generic/atomic64.h
65924+++ b/include/asm-generic/atomic64.h
65925@@ -16,6 +16,8 @@ typedef struct {
65926 long long counter;
65927 } atomic64_t;
65928
65929+typedef atomic64_t atomic64_unchecked_t;
65930+
65931 #define ATOMIC64_INIT(i) { (i) }
65932
65933 extern long long atomic64_read(const atomic64_t *v);
65934@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65935 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65936 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65937
65938+#define atomic64_read_unchecked(v) atomic64_read(v)
65939+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65940+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65941+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65942+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65943+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65944+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65945+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65946+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65947+
65948 #endif /* _ASM_GENERIC_ATOMIC64_H */
65949diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65950index 1bfcfe5..e04c5c9 100644
65951--- a/include/asm-generic/cache.h
65952+++ b/include/asm-generic/cache.h
65953@@ -6,7 +6,7 @@
65954 * cache lines need to provide their own cache.h.
65955 */
65956
65957-#define L1_CACHE_SHIFT 5
65958-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65959+#define L1_CACHE_SHIFT 5UL
65960+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65961
65962 #endif /* __ASM_GENERIC_CACHE_H */
65963diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65964index 0d68a1e..b74a761 100644
65965--- a/include/asm-generic/emergency-restart.h
65966+++ b/include/asm-generic/emergency-restart.h
65967@@ -1,7 +1,7 @@
65968 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65969 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65970
65971-static inline void machine_emergency_restart(void)
65972+static inline __noreturn void machine_emergency_restart(void)
65973 {
65974 machine_restart(NULL);
65975 }
65976diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65977index 90f99c7..00ce236 100644
65978--- a/include/asm-generic/kmap_types.h
65979+++ b/include/asm-generic/kmap_types.h
65980@@ -2,9 +2,9 @@
65981 #define _ASM_GENERIC_KMAP_TYPES_H
65982
65983 #ifdef __WITH_KM_FENCE
65984-# define KM_TYPE_NR 41
65985+# define KM_TYPE_NR 42
65986 #else
65987-# define KM_TYPE_NR 20
65988+# define KM_TYPE_NR 21
65989 #endif
65990
65991 #endif
65992diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
65993index 9ceb03b..62b0b8f 100644
65994--- a/include/asm-generic/local.h
65995+++ b/include/asm-generic/local.h
65996@@ -23,24 +23,37 @@ typedef struct
65997 atomic_long_t a;
65998 } local_t;
65999
66000+typedef struct {
66001+ atomic_long_unchecked_t a;
66002+} local_unchecked_t;
66003+
66004 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
66005
66006 #define local_read(l) atomic_long_read(&(l)->a)
66007+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
66008 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
66009+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
66010 #define local_inc(l) atomic_long_inc(&(l)->a)
66011+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
66012 #define local_dec(l) atomic_long_dec(&(l)->a)
66013+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
66014 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
66015+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
66016 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
66017+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
66018
66019 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
66020 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
66021 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
66022 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
66023 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
66024+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
66025 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
66026 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
66027+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
66028
66029 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66030+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
66031 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
66032 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
66033 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
66034diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
66035index 725612b..9cc513a 100644
66036--- a/include/asm-generic/pgtable-nopmd.h
66037+++ b/include/asm-generic/pgtable-nopmd.h
66038@@ -1,14 +1,19 @@
66039 #ifndef _PGTABLE_NOPMD_H
66040 #define _PGTABLE_NOPMD_H
66041
66042-#ifndef __ASSEMBLY__
66043-
66044 #include <asm-generic/pgtable-nopud.h>
66045
66046-struct mm_struct;
66047-
66048 #define __PAGETABLE_PMD_FOLDED
66049
66050+#define PMD_SHIFT PUD_SHIFT
66051+#define PTRS_PER_PMD 1
66052+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
66053+#define PMD_MASK (~(PMD_SIZE-1))
66054+
66055+#ifndef __ASSEMBLY__
66056+
66057+struct mm_struct;
66058+
66059 /*
66060 * Having the pmd type consist of a pud gets the size right, and allows
66061 * us to conceptually access the pud entry that this pmd is folded into
66062@@ -16,11 +21,6 @@ struct mm_struct;
66063 */
66064 typedef struct { pud_t pud; } pmd_t;
66065
66066-#define PMD_SHIFT PUD_SHIFT
66067-#define PTRS_PER_PMD 1
66068-#define PMD_SIZE (1UL << PMD_SHIFT)
66069-#define PMD_MASK (~(PMD_SIZE-1))
66070-
66071 /*
66072 * The "pud_xxx()" functions here are trivial for a folded two-level
66073 * setup: the pmd is never bad, and a pmd always exists (as it's folded
66074diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
66075index 810431d..0ec4804f 100644
66076--- a/include/asm-generic/pgtable-nopud.h
66077+++ b/include/asm-generic/pgtable-nopud.h
66078@@ -1,10 +1,15 @@
66079 #ifndef _PGTABLE_NOPUD_H
66080 #define _PGTABLE_NOPUD_H
66081
66082-#ifndef __ASSEMBLY__
66083-
66084 #define __PAGETABLE_PUD_FOLDED
66085
66086+#define PUD_SHIFT PGDIR_SHIFT
66087+#define PTRS_PER_PUD 1
66088+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
66089+#define PUD_MASK (~(PUD_SIZE-1))
66090+
66091+#ifndef __ASSEMBLY__
66092+
66093 /*
66094 * Having the pud type consist of a pgd gets the size right, and allows
66095 * us to conceptually access the pgd entry that this pud is folded into
66096@@ -12,11 +17,6 @@
66097 */
66098 typedef struct { pgd_t pgd; } pud_t;
66099
66100-#define PUD_SHIFT PGDIR_SHIFT
66101-#define PTRS_PER_PUD 1
66102-#define PUD_SIZE (1UL << PUD_SHIFT)
66103-#define PUD_MASK (~(PUD_SIZE-1))
66104-
66105 /*
66106 * The "pgd_xxx()" functions here are trivial for a folded two-level
66107 * setup: the pud is never bad, and a pud always exists (as it's folded
66108@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
66109 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
66110
66111 #define pgd_populate(mm, pgd, pud) do { } while (0)
66112+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
66113 /*
66114 * (puds are folded into pgds so this doesn't get actually called,
66115 * but the define is needed for a generic inline function.)
66116diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
66117index 5cf680a..4b74d62 100644
66118--- a/include/asm-generic/pgtable.h
66119+++ b/include/asm-generic/pgtable.h
66120@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
66121 }
66122 #endif /* CONFIG_NUMA_BALANCING */
66123
66124+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
66125+static inline unsigned long pax_open_kernel(void) { return 0; }
66126+#endif
66127+
66128+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
66129+static inline unsigned long pax_close_kernel(void) { return 0; }
66130+#endif
66131+
66132 #endif /* CONFIG_MMU */
66133
66134 #endif /* !__ASSEMBLY__ */
66135diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
66136index d1ea7ce..b1ebf2a 100644
66137--- a/include/asm-generic/vmlinux.lds.h
66138+++ b/include/asm-generic/vmlinux.lds.h
66139@@ -218,6 +218,7 @@
66140 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
66141 VMLINUX_SYMBOL(__start_rodata) = .; \
66142 *(.rodata) *(.rodata.*) \
66143+ *(.data..read_only) \
66144 *(__vermagic) /* Kernel version magic */ \
66145 . = ALIGN(8); \
66146 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
66147@@ -725,17 +726,18 @@
66148 * section in the linker script will go there too. @phdr should have
66149 * a leading colon.
66150 *
66151- * Note that this macros defines __per_cpu_load as an absolute symbol.
66152+ * Note that this macros defines per_cpu_load as an absolute symbol.
66153 * If there is no need to put the percpu section at a predetermined
66154 * address, use PERCPU_SECTION.
66155 */
66156 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
66157- VMLINUX_SYMBOL(__per_cpu_load) = .; \
66158- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
66159+ per_cpu_load = .; \
66160+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
66161 - LOAD_OFFSET) { \
66162+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
66163 PERCPU_INPUT(cacheline) \
66164 } phdr \
66165- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
66166+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
66167
66168 /**
66169 * PERCPU_SECTION - define output section for percpu area, simple version
66170diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
66171index 418d270..bfd2794 100644
66172--- a/include/crypto/algapi.h
66173+++ b/include/crypto/algapi.h
66174@@ -34,7 +34,7 @@ struct crypto_type {
66175 unsigned int maskclear;
66176 unsigned int maskset;
66177 unsigned int tfmsize;
66178-};
66179+} __do_const;
66180
66181 struct crypto_instance {
66182 struct crypto_alg alg;
66183diff --git a/include/drm/drmP.h b/include/drm/drmP.h
66184index fad21c9..ab858bc 100644
66185--- a/include/drm/drmP.h
66186+++ b/include/drm/drmP.h
66187@@ -72,6 +72,7 @@
66188 #include <linux/workqueue.h>
66189 #include <linux/poll.h>
66190 #include <asm/pgalloc.h>
66191+#include <asm/local.h>
66192 #include <drm/drm.h>
66193 #include <drm/drm_sarea.h>
66194
66195@@ -293,10 +294,12 @@ do { \
66196 * \param cmd command.
66197 * \param arg argument.
66198 */
66199-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
66200+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
66201+ struct drm_file *file_priv);
66202+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
66203 struct drm_file *file_priv);
66204
66205-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66206+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
66207 unsigned long arg);
66208
66209 #define DRM_IOCTL_NR(n) _IOC_NR(n)
66210@@ -311,9 +314,9 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
66211 struct drm_ioctl_desc {
66212 unsigned int cmd;
66213 int flags;
66214- drm_ioctl_t *func;
66215+ drm_ioctl_t func;
66216 unsigned int cmd_drv;
66217-};
66218+} __do_const;
66219
66220 /**
66221 * Creates a driver or general drm_ioctl_desc array entry for the given
66222@@ -995,7 +998,7 @@ struct drm_info_list {
66223 int (*show)(struct seq_file*, void*); /** show callback */
66224 u32 driver_features; /**< Required driver features for this entry */
66225 void *data;
66226-};
66227+} __do_const;
66228
66229 /**
66230 * debugfs node structure. This structure represents a debugfs file.
66231@@ -1068,7 +1071,7 @@ struct drm_device {
66232
66233 /** \name Usage Counters */
66234 /*@{ */
66235- int open_count; /**< Outstanding files open */
66236+ local_t open_count; /**< Outstanding files open */
66237 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
66238 atomic_t vma_count; /**< Outstanding vma areas open */
66239 int buf_use; /**< Buffers in use -- cannot alloc */
66240@@ -1079,7 +1082,7 @@ struct drm_device {
66241 /*@{ */
66242 unsigned long counters;
66243 enum drm_stat_type types[15];
66244- atomic_t counts[15];
66245+ atomic_unchecked_t counts[15];
66246 /*@} */
66247
66248 struct list_head filelist;
66249diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
66250index f43d556..94d9343 100644
66251--- a/include/drm/drm_crtc_helper.h
66252+++ b/include/drm/drm_crtc_helper.h
66253@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
66254 struct drm_connector *connector);
66255 /* disable encoder when not in use - more explicit than dpms off */
66256 void (*disable)(struct drm_encoder *encoder);
66257-};
66258+} __no_const;
66259
66260 /**
66261 * drm_connector_helper_funcs - helper operations for connectors
66262diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
66263index 72dcbe8..8db58d7 100644
66264--- a/include/drm/ttm/ttm_memory.h
66265+++ b/include/drm/ttm/ttm_memory.h
66266@@ -48,7 +48,7 @@
66267
66268 struct ttm_mem_shrink {
66269 int (*do_shrink) (struct ttm_mem_shrink *);
66270-};
66271+} __no_const;
66272
66273 /**
66274 * struct ttm_mem_global - Global memory accounting structure.
66275diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
66276index 4b840e8..155d235 100644
66277--- a/include/keys/asymmetric-subtype.h
66278+++ b/include/keys/asymmetric-subtype.h
66279@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
66280 /* Verify the signature on a key of this subtype (optional) */
66281 int (*verify_signature)(const struct key *key,
66282 const struct public_key_signature *sig);
66283-};
66284+} __do_const;
66285
66286 /**
66287 * asymmetric_key_subtype - Get the subtype from an asymmetric key
66288diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
66289index c1da539..1dcec55 100644
66290--- a/include/linux/atmdev.h
66291+++ b/include/linux/atmdev.h
66292@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
66293 #endif
66294
66295 struct k_atm_aal_stats {
66296-#define __HANDLE_ITEM(i) atomic_t i
66297+#define __HANDLE_ITEM(i) atomic_unchecked_t i
66298 __AAL_STAT_ITEMS
66299 #undef __HANDLE_ITEM
66300 };
66301@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
66302 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
66303 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
66304 struct module *owner;
66305-};
66306+} __do_const ;
66307
66308 struct atmphy_ops {
66309 int (*start)(struct atm_dev *dev);
66310diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
66311index 0530b98..96a8ac0 100644
66312--- a/include/linux/binfmts.h
66313+++ b/include/linux/binfmts.h
66314@@ -73,8 +73,9 @@ struct linux_binfmt {
66315 int (*load_binary)(struct linux_binprm *);
66316 int (*load_shlib)(struct file *);
66317 int (*core_dump)(struct coredump_params *cprm);
66318+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
66319 unsigned long min_coredump; /* minimal dump size */
66320-};
66321+} __do_const;
66322
66323 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
66324
66325diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
66326index f94bc83..62b9cfe 100644
66327--- a/include/linux/blkdev.h
66328+++ b/include/linux/blkdev.h
66329@@ -1498,7 +1498,7 @@ struct block_device_operations {
66330 /* this callback is with swap_lock and sometimes page table lock held */
66331 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
66332 struct module *owner;
66333-};
66334+} __do_const;
66335
66336 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
66337 unsigned long);
66338diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
66339index 7c2e030..b72475d 100644
66340--- a/include/linux/blktrace_api.h
66341+++ b/include/linux/blktrace_api.h
66342@@ -23,7 +23,7 @@ struct blk_trace {
66343 struct dentry *dir;
66344 struct dentry *dropped_file;
66345 struct dentry *msg_file;
66346- atomic_t dropped;
66347+ atomic_unchecked_t dropped;
66348 };
66349
66350 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
66351diff --git a/include/linux/cache.h b/include/linux/cache.h
66352index 4c57065..4307975 100644
66353--- a/include/linux/cache.h
66354+++ b/include/linux/cache.h
66355@@ -16,6 +16,10 @@
66356 #define __read_mostly
66357 #endif
66358
66359+#ifndef __read_only
66360+#define __read_only __read_mostly
66361+#endif
66362+
66363 #ifndef ____cacheline_aligned
66364 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66365 #endif
66366diff --git a/include/linux/capability.h b/include/linux/capability.h
66367index 98503b7..cc36d18 100644
66368--- a/include/linux/capability.h
66369+++ b/include/linux/capability.h
66370@@ -211,8 +211,13 @@ extern bool capable(int cap);
66371 extern bool ns_capable(struct user_namespace *ns, int cap);
66372 extern bool nsown_capable(int cap);
66373 extern bool inode_capable(const struct inode *inode, int cap);
66374+extern bool capable_nolog(int cap);
66375+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
66376+extern bool inode_capable_nolog(const struct inode *inode, int cap);
66377
66378 /* audit system wants to get cap info from files as well */
66379 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
66380
66381+extern int is_privileged_binary(const struct dentry *dentry);
66382+
66383 #endif /* !_LINUX_CAPABILITY_H */
66384diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
66385index 8609d57..86e4d79 100644
66386--- a/include/linux/cdrom.h
66387+++ b/include/linux/cdrom.h
66388@@ -87,7 +87,6 @@ struct cdrom_device_ops {
66389
66390 /* driver specifications */
66391 const int capability; /* capability flags */
66392- int n_minors; /* number of active minor devices */
66393 /* handle uniform packets for scsi type devices (scsi,atapi) */
66394 int (*generic_packet) (struct cdrom_device_info *,
66395 struct packet_command *);
66396diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
66397index 42e55de..1cd0e66 100644
66398--- a/include/linux/cleancache.h
66399+++ b/include/linux/cleancache.h
66400@@ -31,7 +31,7 @@ struct cleancache_ops {
66401 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
66402 void (*invalidate_inode)(int, struct cleancache_filekey);
66403 void (*invalidate_fs)(int);
66404-};
66405+} __no_const;
66406
66407 extern struct cleancache_ops
66408 cleancache_register_ops(struct cleancache_ops *ops);
66409diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66410index 662fd1b..e801992 100644
66411--- a/include/linux/compiler-gcc4.h
66412+++ b/include/linux/compiler-gcc4.h
66413@@ -34,6 +34,21 @@
66414 #define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
66415
66416 #if __GNUC_MINOR__ >= 5
66417+
66418+#ifdef CONSTIFY_PLUGIN
66419+#define __no_const __attribute__((no_const))
66420+#define __do_const __attribute__((do_const))
66421+#endif
66422+
66423+#ifdef SIZE_OVERFLOW_PLUGIN
66424+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
66425+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
66426+#endif
66427+
66428+#ifdef LATENT_ENTROPY_PLUGIN
66429+#define __latent_entropy __attribute__((latent_entropy))
66430+#endif
66431+
66432 /*
66433 * Mark a position in code as unreachable. This can be used to
66434 * suppress control flow warnings after asm blocks that transfer
66435@@ -49,6 +64,11 @@
66436 #define __noclone __attribute__((__noclone__))
66437
66438 #endif
66439+
66440+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66441+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66442+#define __bos0(ptr) __bos((ptr), 0)
66443+#define __bos1(ptr) __bos((ptr), 1)
66444 #endif
66445
66446 #if __GNUC_MINOR__ >= 6
66447diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66448index dd852b7..72924c0 100644
66449--- a/include/linux/compiler.h
66450+++ b/include/linux/compiler.h
66451@@ -5,11 +5,14 @@
66452
66453 #ifdef __CHECKER__
66454 # define __user __attribute__((noderef, address_space(1)))
66455+# define __force_user __force __user
66456 # define __kernel __attribute__((address_space(0)))
66457+# define __force_kernel __force __kernel
66458 # define __safe __attribute__((safe))
66459 # define __force __attribute__((force))
66460 # define __nocast __attribute__((nocast))
66461 # define __iomem __attribute__((noderef, address_space(2)))
66462+# define __force_iomem __force __iomem
66463 # define __must_hold(x) __attribute__((context(x,1,1)))
66464 # define __acquires(x) __attribute__((context(x,0,1)))
66465 # define __releases(x) __attribute__((context(x,1,0)))
66466@@ -17,20 +20,48 @@
66467 # define __release(x) __context__(x,-1)
66468 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66469 # define __percpu __attribute__((noderef, address_space(3)))
66470+# define __force_percpu __force __percpu
66471 #ifdef CONFIG_SPARSE_RCU_POINTER
66472 # define __rcu __attribute__((noderef, address_space(4)))
66473+# define __force_rcu __force __rcu
66474 #else
66475 # define __rcu
66476+# define __force_rcu
66477 #endif
66478 extern void __chk_user_ptr(const volatile void __user *);
66479 extern void __chk_io_ptr(const volatile void __iomem *);
66480+#elif defined(CHECKER_PLUGIN)
66481+//# define __user
66482+//# define __force_user
66483+//# define __kernel
66484+//# define __force_kernel
66485+# define __safe
66486+# define __force
66487+# define __nocast
66488+# define __iomem
66489+# define __force_iomem
66490+# define __chk_user_ptr(x) (void)0
66491+# define __chk_io_ptr(x) (void)0
66492+# define __builtin_warning(x, y...) (1)
66493+# define __acquires(x)
66494+# define __releases(x)
66495+# define __acquire(x) (void)0
66496+# define __release(x) (void)0
66497+# define __cond_lock(x,c) (c)
66498+# define __percpu
66499+# define __force_percpu
66500+# define __rcu
66501+# define __force_rcu
66502 #else
66503 # define __user
66504+# define __force_user
66505 # define __kernel
66506+# define __force_kernel
66507 # define __safe
66508 # define __force
66509 # define __nocast
66510 # define __iomem
66511+# define __force_iomem
66512 # define __chk_user_ptr(x) (void)0
66513 # define __chk_io_ptr(x) (void)0
66514 # define __builtin_warning(x, y...) (1)
66515@@ -41,7 +72,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
66516 # define __release(x) (void)0
66517 # define __cond_lock(x,c) (c)
66518 # define __percpu
66519+# define __force_percpu
66520 # define __rcu
66521+# define __force_rcu
66522 #endif
66523
66524 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
66525@@ -275,6 +308,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66526 # define __attribute_const__ /* unimplemented */
66527 #endif
66528
66529+#ifndef __no_const
66530+# define __no_const
66531+#endif
66532+
66533+#ifndef __do_const
66534+# define __do_const
66535+#endif
66536+
66537+#ifndef __size_overflow
66538+# define __size_overflow(...)
66539+#endif
66540+
66541+#ifndef __intentional_overflow
66542+# define __intentional_overflow(...)
66543+#endif
66544+
66545+#ifndef __latent_entropy
66546+# define __latent_entropy
66547+#endif
66548+
66549 /*
66550 * Tell gcc if a function is cold. The compiler will assume any path
66551 * directly leading to the call is unlikely.
66552@@ -284,6 +337,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66553 #define __cold
66554 #endif
66555
66556+#ifndef __alloc_size
66557+#define __alloc_size(...)
66558+#endif
66559+
66560+#ifndef __bos
66561+#define __bos(ptr, arg)
66562+#endif
66563+
66564+#ifndef __bos0
66565+#define __bos0(ptr)
66566+#endif
66567+
66568+#ifndef __bos1
66569+#define __bos1(ptr)
66570+#endif
66571+
66572 /* Simple shorthand for a section definition */
66573 #ifndef __section
66574 # define __section(S) __attribute__ ((__section__(#S)))
66575@@ -323,6 +392,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66576 * use is to mediate communication between process-level code and irq/NMI
66577 * handlers, all running on the same CPU.
66578 */
66579-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66580+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66581+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66582
66583 #endif /* __LINUX_COMPILER_H */
66584diff --git a/include/linux/configfs.h b/include/linux/configfs.h
66585index 34025df..d94bbbc 100644
66586--- a/include/linux/configfs.h
66587+++ b/include/linux/configfs.h
66588@@ -125,7 +125,7 @@ struct configfs_attribute {
66589 const char *ca_name;
66590 struct module *ca_owner;
66591 umode_t ca_mode;
66592-};
66593+} __do_const;
66594
66595 /*
66596 * Users often need to create attribute structures for their configurable
66597diff --git a/include/linux/cpu.h b/include/linux/cpu.h
66598index ce7a074..01ab8ac 100644
66599--- a/include/linux/cpu.h
66600+++ b/include/linux/cpu.h
66601@@ -115,7 +115,7 @@ enum {
66602 /* Need to know about CPUs going up/down? */
66603 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
66604 #define cpu_notifier(fn, pri) { \
66605- static struct notifier_block fn##_nb __cpuinitdata = \
66606+ static struct notifier_block fn##_nb = \
66607 { .notifier_call = fn, .priority = pri }; \
66608 register_cpu_notifier(&fn##_nb); \
66609 }
66610diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
66611index a55b88e..fba90c5 100644
66612--- a/include/linux/cpufreq.h
66613+++ b/include/linux/cpufreq.h
66614@@ -240,7 +240,7 @@ struct cpufreq_driver {
66615 int (*suspend) (struct cpufreq_policy *policy);
66616 int (*resume) (struct cpufreq_policy *policy);
66617 struct freq_attr **attr;
66618-};
66619+} __do_const;
66620
66621 /* flags */
66622
66623@@ -299,6 +299,7 @@ struct global_attr {
66624 ssize_t (*store)(struct kobject *a, struct attribute *b,
66625 const char *c, size_t count);
66626 };
66627+typedef struct global_attr __no_const global_attr_no_const;
66628
66629 #define define_one_global_ro(_name) \
66630 static struct global_attr _name = \
66631diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
66632index 24cd1037..20a63aae 100644
66633--- a/include/linux/cpuidle.h
66634+++ b/include/linux/cpuidle.h
66635@@ -54,7 +54,8 @@ struct cpuidle_state {
66636 int index);
66637
66638 int (*enter_dead) (struct cpuidle_device *dev, int index);
66639-};
66640+} __do_const;
66641+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
66642
66643 /* Idle State Flags */
66644 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
66645@@ -216,7 +217,7 @@ struct cpuidle_governor {
66646 void (*reflect) (struct cpuidle_device *dev, int index);
66647
66648 struct module *owner;
66649-};
66650+} __do_const;
66651
66652 #ifdef CONFIG_CPU_IDLE
66653
66654diff --git a/include/linux/cred.h b/include/linux/cred.h
66655index 04421e8..6bce4ef 100644
66656--- a/include/linux/cred.h
66657+++ b/include/linux/cred.h
66658@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
66659 static inline void validate_process_creds(void)
66660 {
66661 }
66662+static inline void validate_task_creds(struct task_struct *task)
66663+{
66664+}
66665 #endif
66666
66667 /**
66668diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66669index b92eadf..b4ecdc1 100644
66670--- a/include/linux/crypto.h
66671+++ b/include/linux/crypto.h
66672@@ -373,7 +373,7 @@ struct cipher_tfm {
66673 const u8 *key, unsigned int keylen);
66674 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66675 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66676-};
66677+} __no_const;
66678
66679 struct hash_tfm {
66680 int (*init)(struct hash_desc *desc);
66681@@ -394,13 +394,13 @@ struct compress_tfm {
66682 int (*cot_decompress)(struct crypto_tfm *tfm,
66683 const u8 *src, unsigned int slen,
66684 u8 *dst, unsigned int *dlen);
66685-};
66686+} __no_const;
66687
66688 struct rng_tfm {
66689 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66690 unsigned int dlen);
66691 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66692-};
66693+} __no_const;
66694
66695 #define crt_ablkcipher crt_u.ablkcipher
66696 #define crt_aead crt_u.aead
66697diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66698index 7925bf0..d5143d2 100644
66699--- a/include/linux/decompress/mm.h
66700+++ b/include/linux/decompress/mm.h
66701@@ -77,7 +77,7 @@ static void free(void *where)
66702 * warnings when not needed (indeed large_malloc / large_free are not
66703 * needed by inflate */
66704
66705-#define malloc(a) kmalloc(a, GFP_KERNEL)
66706+#define malloc(a) kmalloc((a), GFP_KERNEL)
66707 #define free(a) kfree(a)
66708
66709 #define large_malloc(a) vmalloc(a)
66710diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
66711index e83ef39..33e0eb3 100644
66712--- a/include/linux/devfreq.h
66713+++ b/include/linux/devfreq.h
66714@@ -114,7 +114,7 @@ struct devfreq_governor {
66715 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
66716 int (*event_handler)(struct devfreq *devfreq,
66717 unsigned int event, void *data);
66718-};
66719+} __do_const;
66720
66721 /**
66722 * struct devfreq - Device devfreq structure
66723diff --git a/include/linux/device.h b/include/linux/device.h
66724index 43dcda9..7a1fb65 100644
66725--- a/include/linux/device.h
66726+++ b/include/linux/device.h
66727@@ -294,7 +294,7 @@ struct subsys_interface {
66728 struct list_head node;
66729 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
66730 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
66731-};
66732+} __do_const;
66733
66734 int subsys_interface_register(struct subsys_interface *sif);
66735 void subsys_interface_unregister(struct subsys_interface *sif);
66736@@ -474,7 +474,7 @@ struct device_type {
66737 void (*release)(struct device *dev);
66738
66739 const struct dev_pm_ops *pm;
66740-};
66741+} __do_const;
66742
66743 /* interface for exporting device attributes */
66744 struct device_attribute {
66745@@ -484,11 +484,12 @@ struct device_attribute {
66746 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
66747 const char *buf, size_t count);
66748 };
66749+typedef struct device_attribute __no_const device_attribute_no_const;
66750
66751 struct dev_ext_attribute {
66752 struct device_attribute attr;
66753 void *var;
66754-};
66755+} __do_const;
66756
66757 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
66758 char *buf);
66759diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66760index 94af418..b1ca7a2 100644
66761--- a/include/linux/dma-mapping.h
66762+++ b/include/linux/dma-mapping.h
66763@@ -54,7 +54,7 @@ struct dma_map_ops {
66764 u64 (*get_required_mask)(struct device *dev);
66765 #endif
66766 int is_phys;
66767-};
66768+} __do_const;
66769
66770 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66771
66772diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
66773index d3201e4..8281e63 100644
66774--- a/include/linux/dmaengine.h
66775+++ b/include/linux/dmaengine.h
66776@@ -1018,9 +1018,9 @@ struct dma_pinned_list {
66777 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
66778 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
66779
66780-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
66781+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
66782 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
66783-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
66784+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
66785 struct dma_pinned_list *pinned_list, struct page *page,
66786 unsigned int offset, size_t len);
66787
66788diff --git a/include/linux/efi.h b/include/linux/efi.h
66789index 7a9498a..155713d 100644
66790--- a/include/linux/efi.h
66791+++ b/include/linux/efi.h
66792@@ -733,6 +733,7 @@ struct efivar_operations {
66793 efi_set_variable_t *set_variable;
66794 efi_query_variable_info_t *query_variable_info;
66795 };
66796+typedef struct efivar_operations __no_const efivar_operations_no_const;
66797
66798 struct efivars {
66799 /*
66800diff --git a/include/linux/elf.h b/include/linux/elf.h
66801index 8c9048e..16a4665 100644
66802--- a/include/linux/elf.h
66803+++ b/include/linux/elf.h
66804@@ -20,6 +20,7 @@ extern Elf32_Dyn _DYNAMIC [];
66805 #define elf_note elf32_note
66806 #define elf_addr_t Elf32_Off
66807 #define Elf_Half Elf32_Half
66808+#define elf_dyn Elf32_Dyn
66809
66810 #else
66811
66812@@ -30,6 +31,7 @@ extern Elf64_Dyn _DYNAMIC [];
66813 #define elf_note elf64_note
66814 #define elf_addr_t Elf64_Off
66815 #define Elf_Half Elf64_Half
66816+#define elf_dyn Elf64_Dyn
66817
66818 #endif
66819
66820diff --git a/include/linux/extcon.h b/include/linux/extcon.h
66821index fcb51c8..bdafcf6 100644
66822--- a/include/linux/extcon.h
66823+++ b/include/linux/extcon.h
66824@@ -134,7 +134,7 @@ struct extcon_dev {
66825 /* /sys/class/extcon/.../mutually_exclusive/... */
66826 struct attribute_group attr_g_muex;
66827 struct attribute **attrs_muex;
66828- struct device_attribute *d_attrs_muex;
66829+ device_attribute_no_const *d_attrs_muex;
66830 };
66831
66832 /**
66833diff --git a/include/linux/fb.h b/include/linux/fb.h
66834index c7a9571..02eeffe 100644
66835--- a/include/linux/fb.h
66836+++ b/include/linux/fb.h
66837@@ -302,7 +302,7 @@ struct fb_ops {
66838 /* called at KDB enter and leave time to prepare the console */
66839 int (*fb_debug_enter)(struct fb_info *info);
66840 int (*fb_debug_leave)(struct fb_info *info);
66841-};
66842+} __do_const;
66843
66844 #ifdef CONFIG_FB_TILEBLITTING
66845 #define FB_TILE_CURSOR_NONE 0
66846diff --git a/include/linux/filter.h b/include/linux/filter.h
66847index c45eabc..baa0be5 100644
66848--- a/include/linux/filter.h
66849+++ b/include/linux/filter.h
66850@@ -20,6 +20,7 @@ struct compat_sock_fprog {
66851
66852 struct sk_buff;
66853 struct sock;
66854+struct bpf_jit_work;
66855
66856 struct sk_filter
66857 {
66858@@ -27,6 +28,9 @@ struct sk_filter
66859 unsigned int len; /* Number of filter blocks */
66860 unsigned int (*bpf_func)(const struct sk_buff *skb,
66861 const struct sock_filter *filter);
66862+#ifdef CONFIG_BPF_JIT
66863+ struct bpf_jit_work *work;
66864+#endif
66865 struct rcu_head rcu;
66866 struct sock_filter insns[0];
66867 };
66868diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
66869index 3044254..9767f41 100644
66870--- a/include/linux/frontswap.h
66871+++ b/include/linux/frontswap.h
66872@@ -11,7 +11,7 @@ struct frontswap_ops {
66873 int (*load)(unsigned, pgoff_t, struct page *);
66874 void (*invalidate_page)(unsigned, pgoff_t);
66875 void (*invalidate_area)(unsigned);
66876-};
66877+} __no_const;
66878
66879 extern bool frontswap_enabled;
66880 extern struct frontswap_ops
66881diff --git a/include/linux/fs.h b/include/linux/fs.h
66882index 7617ee0..b575199 100644
66883--- a/include/linux/fs.h
66884+++ b/include/linux/fs.h
66885@@ -1541,7 +1541,8 @@ struct file_operations {
66886 long (*fallocate)(struct file *file, int mode, loff_t offset,
66887 loff_t len);
66888 int (*show_fdinfo)(struct seq_file *m, struct file *f);
66889-};
66890+} __do_const;
66891+typedef struct file_operations __no_const file_operations_no_const;
66892
66893 struct inode_operations {
66894 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
66895@@ -2665,4 +2666,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
66896 inode->i_flags |= S_NOSEC;
66897 }
66898
66899+static inline bool is_sidechannel_device(const struct inode *inode)
66900+{
66901+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
66902+ umode_t mode = inode->i_mode;
66903+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
66904+#else
66905+ return false;
66906+#endif
66907+}
66908+
66909 #endif /* _LINUX_FS_H */
66910diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66911index d0ae3a8..0244b34 100644
66912--- a/include/linux/fs_struct.h
66913+++ b/include/linux/fs_struct.h
66914@@ -6,7 +6,7 @@
66915 #include <linux/seqlock.h>
66916
66917 struct fs_struct {
66918- int users;
66919+ atomic_t users;
66920 spinlock_t lock;
66921 seqcount_t seq;
66922 int umask;
66923diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66924index 5dfa0aa..6acf322 100644
66925--- a/include/linux/fscache-cache.h
66926+++ b/include/linux/fscache-cache.h
66927@@ -112,7 +112,7 @@ struct fscache_operation {
66928 fscache_operation_release_t release;
66929 };
66930
66931-extern atomic_t fscache_op_debug_id;
66932+extern atomic_unchecked_t fscache_op_debug_id;
66933 extern void fscache_op_work_func(struct work_struct *work);
66934
66935 extern void fscache_enqueue_operation(struct fscache_operation *);
66936@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66937 INIT_WORK(&op->work, fscache_op_work_func);
66938 atomic_set(&op->usage, 1);
66939 op->state = FSCACHE_OP_ST_INITIALISED;
66940- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66941+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66942 op->processor = processor;
66943 op->release = release;
66944 INIT_LIST_HEAD(&op->pend_link);
66945diff --git a/include/linux/fscache.h b/include/linux/fscache.h
66946index 7a08623..4c07b0f 100644
66947--- a/include/linux/fscache.h
66948+++ b/include/linux/fscache.h
66949@@ -152,7 +152,7 @@ struct fscache_cookie_def {
66950 * - this is mandatory for any object that may have data
66951 */
66952 void (*now_uncached)(void *cookie_netfs_data);
66953-};
66954+} __do_const;
66955
66956 /*
66957 * fscache cached network filesystem type
66958diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
66959index 0fbfb46..508eb0d 100644
66960--- a/include/linux/fsnotify.h
66961+++ b/include/linux/fsnotify.h
66962@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
66963 struct inode *inode = path->dentry->d_inode;
66964 __u32 mask = FS_ACCESS;
66965
66966+ if (is_sidechannel_device(inode))
66967+ return;
66968+
66969 if (S_ISDIR(inode->i_mode))
66970 mask |= FS_ISDIR;
66971
66972@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
66973 struct inode *inode = path->dentry->d_inode;
66974 __u32 mask = FS_MODIFY;
66975
66976+ if (is_sidechannel_device(inode))
66977+ return;
66978+
66979 if (S_ISDIR(inode->i_mode))
66980 mask |= FS_ISDIR;
66981
66982@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
66983 */
66984 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
66985 {
66986- return kstrdup(name, GFP_KERNEL);
66987+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
66988 }
66989
66990 /*
66991diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66992index a3d4895..ddd2a50 100644
66993--- a/include/linux/ftrace_event.h
66994+++ b/include/linux/ftrace_event.h
66995@@ -272,7 +272,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
66996 extern int trace_add_event_call(struct ftrace_event_call *call);
66997 extern void trace_remove_event_call(struct ftrace_event_call *call);
66998
66999-#define is_signed_type(type) (((type)(-1)) < 0)
67000+#define is_signed_type(type) (((type)(-1)) < (type)1)
67001
67002 int trace_set_clr_event(const char *system, const char *event, int set);
67003
67004diff --git a/include/linux/genhd.h b/include/linux/genhd.h
67005index 79b8bba..86b539e 100644
67006--- a/include/linux/genhd.h
67007+++ b/include/linux/genhd.h
67008@@ -194,7 +194,7 @@ struct gendisk {
67009 struct kobject *slave_dir;
67010
67011 struct timer_rand_state *random;
67012- atomic_t sync_io; /* RAID */
67013+ atomic_unchecked_t sync_io; /* RAID */
67014 struct disk_events *ev;
67015 #ifdef CONFIG_BLK_DEV_INTEGRITY
67016 struct blk_integrity *integrity;
67017diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
67018index 023bc34..b02b46a 100644
67019--- a/include/linux/genl_magic_func.h
67020+++ b/include/linux/genl_magic_func.h
67021@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
67022 },
67023
67024 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
67025-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
67026+static struct genl_ops ZZZ_genl_ops[] = {
67027 #include GENL_MAGIC_INCLUDE_FILE
67028 };
67029
67030diff --git a/include/linux/gfp.h b/include/linux/gfp.h
67031index 0f615eb..5c3832f 100644
67032--- a/include/linux/gfp.h
67033+++ b/include/linux/gfp.h
67034@@ -35,6 +35,13 @@ struct vm_area_struct;
67035 #define ___GFP_NO_KSWAPD 0x400000u
67036 #define ___GFP_OTHER_NODE 0x800000u
67037 #define ___GFP_WRITE 0x1000000u
67038+
67039+#ifdef CONFIG_PAX_USERCOPY_SLABS
67040+#define ___GFP_USERCOPY 0x2000000u
67041+#else
67042+#define ___GFP_USERCOPY 0
67043+#endif
67044+
67045 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
67046
67047 /*
67048@@ -92,6 +99,7 @@ struct vm_area_struct;
67049 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
67050 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
67051 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
67052+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
67053
67054 /*
67055 * This may seem redundant, but it's a way of annotating false positives vs.
67056@@ -99,7 +107,7 @@ struct vm_area_struct;
67057 */
67058 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
67059
67060-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
67061+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
67062 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
67063
67064 /* This equals 0, but use constants in case they ever change */
67065@@ -153,6 +161,8 @@ struct vm_area_struct;
67066 /* 4GB DMA on some platforms */
67067 #define GFP_DMA32 __GFP_DMA32
67068
67069+#define GFP_USERCOPY __GFP_USERCOPY
67070+
67071 /* Convert GFP flags to their corresponding migrate type */
67072 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
67073 {
67074diff --git a/include/linux/gracl.h b/include/linux/gracl.h
67075new file mode 100644
67076index 0000000..ebe6d72
67077--- /dev/null
67078+++ b/include/linux/gracl.h
67079@@ -0,0 +1,319 @@
67080+#ifndef GR_ACL_H
67081+#define GR_ACL_H
67082+
67083+#include <linux/grdefs.h>
67084+#include <linux/resource.h>
67085+#include <linux/capability.h>
67086+#include <linux/dcache.h>
67087+#include <asm/resource.h>
67088+
67089+/* Major status information */
67090+
67091+#define GR_VERSION "grsecurity 2.9.1"
67092+#define GRSECURITY_VERSION 0x2901
67093+
67094+enum {
67095+ GR_SHUTDOWN = 0,
67096+ GR_ENABLE = 1,
67097+ GR_SPROLE = 2,
67098+ GR_RELOAD = 3,
67099+ GR_SEGVMOD = 4,
67100+ GR_STATUS = 5,
67101+ GR_UNSPROLE = 6,
67102+ GR_PASSSET = 7,
67103+ GR_SPROLEPAM = 8,
67104+};
67105+
67106+/* Password setup definitions
67107+ * kernel/grhash.c */
67108+enum {
67109+ GR_PW_LEN = 128,
67110+ GR_SALT_LEN = 16,
67111+ GR_SHA_LEN = 32,
67112+};
67113+
67114+enum {
67115+ GR_SPROLE_LEN = 64,
67116+};
67117+
67118+enum {
67119+ GR_NO_GLOB = 0,
67120+ GR_REG_GLOB,
67121+ GR_CREATE_GLOB
67122+};
67123+
67124+#define GR_NLIMITS 32
67125+
67126+/* Begin Data Structures */
67127+
67128+struct sprole_pw {
67129+ unsigned char *rolename;
67130+ unsigned char salt[GR_SALT_LEN];
67131+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
67132+};
67133+
67134+struct name_entry {
67135+ __u32 key;
67136+ ino_t inode;
67137+ dev_t device;
67138+ char *name;
67139+ __u16 len;
67140+ __u8 deleted;
67141+ struct name_entry *prev;
67142+ struct name_entry *next;
67143+};
67144+
67145+struct inodev_entry {
67146+ struct name_entry *nentry;
67147+ struct inodev_entry *prev;
67148+ struct inodev_entry *next;
67149+};
67150+
67151+struct acl_role_db {
67152+ struct acl_role_label **r_hash;
67153+ __u32 r_size;
67154+};
67155+
67156+struct inodev_db {
67157+ struct inodev_entry **i_hash;
67158+ __u32 i_size;
67159+};
67160+
67161+struct name_db {
67162+ struct name_entry **n_hash;
67163+ __u32 n_size;
67164+};
67165+
67166+struct crash_uid {
67167+ uid_t uid;
67168+ unsigned long expires;
67169+};
67170+
67171+struct gr_hash_struct {
67172+ void **table;
67173+ void **nametable;
67174+ void *first;
67175+ __u32 table_size;
67176+ __u32 used_size;
67177+ int type;
67178+};
67179+
67180+/* Userspace Grsecurity ACL data structures */
67181+
67182+struct acl_subject_label {
67183+ char *filename;
67184+ ino_t inode;
67185+ dev_t device;
67186+ __u32 mode;
67187+ kernel_cap_t cap_mask;
67188+ kernel_cap_t cap_lower;
67189+ kernel_cap_t cap_invert_audit;
67190+
67191+ struct rlimit res[GR_NLIMITS];
67192+ __u32 resmask;
67193+
67194+ __u8 user_trans_type;
67195+ __u8 group_trans_type;
67196+ uid_t *user_transitions;
67197+ gid_t *group_transitions;
67198+ __u16 user_trans_num;
67199+ __u16 group_trans_num;
67200+
67201+ __u32 sock_families[2];
67202+ __u32 ip_proto[8];
67203+ __u32 ip_type;
67204+ struct acl_ip_label **ips;
67205+ __u32 ip_num;
67206+ __u32 inaddr_any_override;
67207+
67208+ __u32 crashes;
67209+ unsigned long expires;
67210+
67211+ struct acl_subject_label *parent_subject;
67212+ struct gr_hash_struct *hash;
67213+ struct acl_subject_label *prev;
67214+ struct acl_subject_label *next;
67215+
67216+ struct acl_object_label **obj_hash;
67217+ __u32 obj_hash_size;
67218+ __u16 pax_flags;
67219+};
67220+
67221+struct role_allowed_ip {
67222+ __u32 addr;
67223+ __u32 netmask;
67224+
67225+ struct role_allowed_ip *prev;
67226+ struct role_allowed_ip *next;
67227+};
67228+
67229+struct role_transition {
67230+ char *rolename;
67231+
67232+ struct role_transition *prev;
67233+ struct role_transition *next;
67234+};
67235+
67236+struct acl_role_label {
67237+ char *rolename;
67238+ uid_t uidgid;
67239+ __u16 roletype;
67240+
67241+ __u16 auth_attempts;
67242+ unsigned long expires;
67243+
67244+ struct acl_subject_label *root_label;
67245+ struct gr_hash_struct *hash;
67246+
67247+ struct acl_role_label *prev;
67248+ struct acl_role_label *next;
67249+
67250+ struct role_transition *transitions;
67251+ struct role_allowed_ip *allowed_ips;
67252+ uid_t *domain_children;
67253+ __u16 domain_child_num;
67254+
67255+ umode_t umask;
67256+
67257+ struct acl_subject_label **subj_hash;
67258+ __u32 subj_hash_size;
67259+};
67260+
67261+struct user_acl_role_db {
67262+ struct acl_role_label **r_table;
67263+ __u32 num_pointers; /* Number of allocations to track */
67264+ __u32 num_roles; /* Number of roles */
67265+ __u32 num_domain_children; /* Number of domain children */
67266+ __u32 num_subjects; /* Number of subjects */
67267+ __u32 num_objects; /* Number of objects */
67268+};
67269+
67270+struct acl_object_label {
67271+ char *filename;
67272+ ino_t inode;
67273+ dev_t device;
67274+ __u32 mode;
67275+
67276+ struct acl_subject_label *nested;
67277+ struct acl_object_label *globbed;
67278+
67279+ /* next two structures not used */
67280+
67281+ struct acl_object_label *prev;
67282+ struct acl_object_label *next;
67283+};
67284+
67285+struct acl_ip_label {
67286+ char *iface;
67287+ __u32 addr;
67288+ __u32 netmask;
67289+ __u16 low, high;
67290+ __u8 mode;
67291+ __u32 type;
67292+ __u32 proto[8];
67293+
67294+ /* next two structures not used */
67295+
67296+ struct acl_ip_label *prev;
67297+ struct acl_ip_label *next;
67298+};
67299+
67300+struct gr_arg {
67301+ struct user_acl_role_db role_db;
67302+ unsigned char pw[GR_PW_LEN];
67303+ unsigned char salt[GR_SALT_LEN];
67304+ unsigned char sum[GR_SHA_LEN];
67305+ unsigned char sp_role[GR_SPROLE_LEN];
67306+ struct sprole_pw *sprole_pws;
67307+ dev_t segv_device;
67308+ ino_t segv_inode;
67309+ uid_t segv_uid;
67310+ __u16 num_sprole_pws;
67311+ __u16 mode;
67312+};
67313+
67314+struct gr_arg_wrapper {
67315+ struct gr_arg *arg;
67316+ __u32 version;
67317+ __u32 size;
67318+};
67319+
67320+struct subject_map {
67321+ struct acl_subject_label *user;
67322+ struct acl_subject_label *kernel;
67323+ struct subject_map *prev;
67324+ struct subject_map *next;
67325+};
67326+
67327+struct acl_subj_map_db {
67328+ struct subject_map **s_hash;
67329+ __u32 s_size;
67330+};
67331+
67332+/* End Data Structures Section */
67333+
67334+/* Hash functions generated by empirical testing by Brad Spengler
67335+ Makes good use of the low bits of the inode. Generally 0-1 times
67336+ in loop for successful match. 0-3 for unsuccessful match.
67337+ Shift/add algorithm with modulus of table size and an XOR*/
67338+
67339+static __inline__ unsigned int
67340+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
67341+{
67342+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
67343+}
67344+
67345+ static __inline__ unsigned int
67346+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
67347+{
67348+ return ((const unsigned long)userp % sz);
67349+}
67350+
67351+static __inline__ unsigned int
67352+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
67353+{
67354+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
67355+}
67356+
67357+static __inline__ unsigned int
67358+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
67359+{
67360+ return full_name_hash((const unsigned char *)name, len) % sz;
67361+}
67362+
67363+#define FOR_EACH_ROLE_START(role) \
67364+ role = role_list; \
67365+ while (role) {
67366+
67367+#define FOR_EACH_ROLE_END(role) \
67368+ role = role->prev; \
67369+ }
67370+
67371+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
67372+ subj = NULL; \
67373+ iter = 0; \
67374+ while (iter < role->subj_hash_size) { \
67375+ if (subj == NULL) \
67376+ subj = role->subj_hash[iter]; \
67377+ if (subj == NULL) { \
67378+ iter++; \
67379+ continue; \
67380+ }
67381+
67382+#define FOR_EACH_SUBJECT_END(subj,iter) \
67383+ subj = subj->next; \
67384+ if (subj == NULL) \
67385+ iter++; \
67386+ }
67387+
67388+
67389+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
67390+ subj = role->hash->first; \
67391+ while (subj != NULL) {
67392+
67393+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
67394+ subj = subj->next; \
67395+ }
67396+
67397+#endif
67398+
67399diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
67400new file mode 100644
67401index 0000000..323ecf2
67402--- /dev/null
67403+++ b/include/linux/gralloc.h
67404@@ -0,0 +1,9 @@
67405+#ifndef __GRALLOC_H
67406+#define __GRALLOC_H
67407+
67408+void acl_free_all(void);
67409+int acl_alloc_stack_init(unsigned long size);
67410+void *acl_alloc(unsigned long len);
67411+void *acl_alloc_num(unsigned long num, unsigned long len);
67412+
67413+#endif
67414diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
67415new file mode 100644
67416index 0000000..be66033
67417--- /dev/null
67418+++ b/include/linux/grdefs.h
67419@@ -0,0 +1,140 @@
67420+#ifndef GRDEFS_H
67421+#define GRDEFS_H
67422+
67423+/* Begin grsecurity status declarations */
67424+
67425+enum {
67426+ GR_READY = 0x01,
67427+ GR_STATUS_INIT = 0x00 // disabled state
67428+};
67429+
67430+/* Begin ACL declarations */
67431+
67432+/* Role flags */
67433+
67434+enum {
67435+ GR_ROLE_USER = 0x0001,
67436+ GR_ROLE_GROUP = 0x0002,
67437+ GR_ROLE_DEFAULT = 0x0004,
67438+ GR_ROLE_SPECIAL = 0x0008,
67439+ GR_ROLE_AUTH = 0x0010,
67440+ GR_ROLE_NOPW = 0x0020,
67441+ GR_ROLE_GOD = 0x0040,
67442+ GR_ROLE_LEARN = 0x0080,
67443+ GR_ROLE_TPE = 0x0100,
67444+ GR_ROLE_DOMAIN = 0x0200,
67445+ GR_ROLE_PAM = 0x0400,
67446+ GR_ROLE_PERSIST = 0x0800
67447+};
67448+
67449+/* ACL Subject and Object mode flags */
67450+enum {
67451+ GR_DELETED = 0x80000000
67452+};
67453+
67454+/* ACL Object-only mode flags */
67455+enum {
67456+ GR_READ = 0x00000001,
67457+ GR_APPEND = 0x00000002,
67458+ GR_WRITE = 0x00000004,
67459+ GR_EXEC = 0x00000008,
67460+ GR_FIND = 0x00000010,
67461+ GR_INHERIT = 0x00000020,
67462+ GR_SETID = 0x00000040,
67463+ GR_CREATE = 0x00000080,
67464+ GR_DELETE = 0x00000100,
67465+ GR_LINK = 0x00000200,
67466+ GR_AUDIT_READ = 0x00000400,
67467+ GR_AUDIT_APPEND = 0x00000800,
67468+ GR_AUDIT_WRITE = 0x00001000,
67469+ GR_AUDIT_EXEC = 0x00002000,
67470+ GR_AUDIT_FIND = 0x00004000,
67471+ GR_AUDIT_INHERIT= 0x00008000,
67472+ GR_AUDIT_SETID = 0x00010000,
67473+ GR_AUDIT_CREATE = 0x00020000,
67474+ GR_AUDIT_DELETE = 0x00040000,
67475+ GR_AUDIT_LINK = 0x00080000,
67476+ GR_PTRACERD = 0x00100000,
67477+ GR_NOPTRACE = 0x00200000,
67478+ GR_SUPPRESS = 0x00400000,
67479+ GR_NOLEARN = 0x00800000,
67480+ GR_INIT_TRANSFER= 0x01000000
67481+};
67482+
67483+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67484+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67485+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67486+
67487+/* ACL subject-only mode flags */
67488+enum {
67489+ GR_KILL = 0x00000001,
67490+ GR_VIEW = 0x00000002,
67491+ GR_PROTECTED = 0x00000004,
67492+ GR_LEARN = 0x00000008,
67493+ GR_OVERRIDE = 0x00000010,
67494+ /* just a placeholder, this mode is only used in userspace */
67495+ GR_DUMMY = 0x00000020,
67496+ GR_PROTSHM = 0x00000040,
67497+ GR_KILLPROC = 0x00000080,
67498+ GR_KILLIPPROC = 0x00000100,
67499+ /* just a placeholder, this mode is only used in userspace */
67500+ GR_NOTROJAN = 0x00000200,
67501+ GR_PROTPROCFD = 0x00000400,
67502+ GR_PROCACCT = 0x00000800,
67503+ GR_RELAXPTRACE = 0x00001000,
67504+ //GR_NESTED = 0x00002000,
67505+ GR_INHERITLEARN = 0x00004000,
67506+ GR_PROCFIND = 0x00008000,
67507+ GR_POVERRIDE = 0x00010000,
67508+ GR_KERNELAUTH = 0x00020000,
67509+ GR_ATSECURE = 0x00040000,
67510+ GR_SHMEXEC = 0x00080000
67511+};
67512+
67513+enum {
67514+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67515+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67516+ GR_PAX_ENABLE_MPROTECT = 0x0004,
67517+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
67518+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67519+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67520+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67521+ GR_PAX_DISABLE_MPROTECT = 0x0400,
67522+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
67523+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67524+};
67525+
67526+enum {
67527+ GR_ID_USER = 0x01,
67528+ GR_ID_GROUP = 0x02,
67529+};
67530+
67531+enum {
67532+ GR_ID_ALLOW = 0x01,
67533+ GR_ID_DENY = 0x02,
67534+};
67535+
67536+#define GR_CRASH_RES 31
67537+#define GR_UIDTABLE_MAX 500
67538+
67539+/* begin resource learning section */
67540+enum {
67541+ GR_RLIM_CPU_BUMP = 60,
67542+ GR_RLIM_FSIZE_BUMP = 50000,
67543+ GR_RLIM_DATA_BUMP = 10000,
67544+ GR_RLIM_STACK_BUMP = 1000,
67545+ GR_RLIM_CORE_BUMP = 10000,
67546+ GR_RLIM_RSS_BUMP = 500000,
67547+ GR_RLIM_NPROC_BUMP = 1,
67548+ GR_RLIM_NOFILE_BUMP = 5,
67549+ GR_RLIM_MEMLOCK_BUMP = 50000,
67550+ GR_RLIM_AS_BUMP = 500000,
67551+ GR_RLIM_LOCKS_BUMP = 2,
67552+ GR_RLIM_SIGPENDING_BUMP = 5,
67553+ GR_RLIM_MSGQUEUE_BUMP = 10000,
67554+ GR_RLIM_NICE_BUMP = 1,
67555+ GR_RLIM_RTPRIO_BUMP = 1,
67556+ GR_RLIM_RTTIME_BUMP = 1000000
67557+};
67558+
67559+#endif
67560diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67561new file mode 100644
67562index 0000000..9bb6662
67563--- /dev/null
67564+++ b/include/linux/grinternal.h
67565@@ -0,0 +1,215 @@
67566+#ifndef __GRINTERNAL_H
67567+#define __GRINTERNAL_H
67568+
67569+#ifdef CONFIG_GRKERNSEC
67570+
67571+#include <linux/fs.h>
67572+#include <linux/mnt_namespace.h>
67573+#include <linux/nsproxy.h>
67574+#include <linux/gracl.h>
67575+#include <linux/grdefs.h>
67576+#include <linux/grmsg.h>
67577+
67578+void gr_add_learn_entry(const char *fmt, ...)
67579+ __attribute__ ((format (printf, 1, 2)));
67580+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67581+ const struct vfsmount *mnt);
67582+__u32 gr_check_create(const struct dentry *new_dentry,
67583+ const struct dentry *parent,
67584+ const struct vfsmount *mnt, const __u32 mode);
67585+int gr_check_protected_task(const struct task_struct *task);
67586+__u32 to_gr_audit(const __u32 reqmode);
67587+int gr_set_acls(const int type);
67588+int gr_apply_subject_to_task(struct task_struct *task);
67589+int gr_acl_is_enabled(void);
67590+char gr_roletype_to_char(void);
67591+
67592+void gr_handle_alertkill(struct task_struct *task);
67593+char *gr_to_filename(const struct dentry *dentry,
67594+ const struct vfsmount *mnt);
67595+char *gr_to_filename1(const struct dentry *dentry,
67596+ const struct vfsmount *mnt);
67597+char *gr_to_filename2(const struct dentry *dentry,
67598+ const struct vfsmount *mnt);
67599+char *gr_to_filename3(const struct dentry *dentry,
67600+ const struct vfsmount *mnt);
67601+
67602+extern int grsec_enable_ptrace_readexec;
67603+extern int grsec_enable_harden_ptrace;
67604+extern int grsec_enable_link;
67605+extern int grsec_enable_fifo;
67606+extern int grsec_enable_execve;
67607+extern int grsec_enable_shm;
67608+extern int grsec_enable_execlog;
67609+extern int grsec_enable_signal;
67610+extern int grsec_enable_audit_ptrace;
67611+extern int grsec_enable_forkfail;
67612+extern int grsec_enable_time;
67613+extern int grsec_enable_rofs;
67614+extern int grsec_enable_chroot_shmat;
67615+extern int grsec_enable_chroot_mount;
67616+extern int grsec_enable_chroot_double;
67617+extern int grsec_enable_chroot_pivot;
67618+extern int grsec_enable_chroot_chdir;
67619+extern int grsec_enable_chroot_chmod;
67620+extern int grsec_enable_chroot_mknod;
67621+extern int grsec_enable_chroot_fchdir;
67622+extern int grsec_enable_chroot_nice;
67623+extern int grsec_enable_chroot_execlog;
67624+extern int grsec_enable_chroot_caps;
67625+extern int grsec_enable_chroot_sysctl;
67626+extern int grsec_enable_chroot_unix;
67627+extern int grsec_enable_symlinkown;
67628+extern kgid_t grsec_symlinkown_gid;
67629+extern int grsec_enable_tpe;
67630+extern kgid_t grsec_tpe_gid;
67631+extern int grsec_enable_tpe_all;
67632+extern int grsec_enable_tpe_invert;
67633+extern int grsec_enable_socket_all;
67634+extern kgid_t grsec_socket_all_gid;
67635+extern int grsec_enable_socket_client;
67636+extern kgid_t grsec_socket_client_gid;
67637+extern int grsec_enable_socket_server;
67638+extern kgid_t grsec_socket_server_gid;
67639+extern kgid_t grsec_audit_gid;
67640+extern int grsec_enable_group;
67641+extern int grsec_enable_audit_textrel;
67642+extern int grsec_enable_log_rwxmaps;
67643+extern int grsec_enable_mount;
67644+extern int grsec_enable_chdir;
67645+extern int grsec_resource_logging;
67646+extern int grsec_enable_blackhole;
67647+extern int grsec_lastack_retries;
67648+extern int grsec_enable_brute;
67649+extern int grsec_lock;
67650+
67651+extern spinlock_t grsec_alert_lock;
67652+extern unsigned long grsec_alert_wtime;
67653+extern unsigned long grsec_alert_fyet;
67654+
67655+extern spinlock_t grsec_audit_lock;
67656+
67657+extern rwlock_t grsec_exec_file_lock;
67658+
67659+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67660+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67661+ (tsk)->exec_file->f_vfsmnt) : "/")
67662+
67663+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67664+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67665+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67666+
67667+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67668+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67669+ (tsk)->exec_file->f_vfsmnt) : "/")
67670+
67671+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67672+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67673+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67674+
67675+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67676+
67677+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67678+
67679+#define GR_CHROOT_CAPS {{ \
67680+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67681+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67682+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67683+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67684+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67685+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67686+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
67687+
67688+#define security_learn(normal_msg,args...) \
67689+({ \
67690+ read_lock(&grsec_exec_file_lock); \
67691+ gr_add_learn_entry(normal_msg "\n", ## args); \
67692+ read_unlock(&grsec_exec_file_lock); \
67693+})
67694+
67695+enum {
67696+ GR_DO_AUDIT,
67697+ GR_DONT_AUDIT,
67698+ /* used for non-audit messages that we shouldn't kill the task on */
67699+ GR_DONT_AUDIT_GOOD
67700+};
67701+
67702+enum {
67703+ GR_TTYSNIFF,
67704+ GR_RBAC,
67705+ GR_RBAC_STR,
67706+ GR_STR_RBAC,
67707+ GR_RBAC_MODE2,
67708+ GR_RBAC_MODE3,
67709+ GR_FILENAME,
67710+ GR_SYSCTL_HIDDEN,
67711+ GR_NOARGS,
67712+ GR_ONE_INT,
67713+ GR_ONE_INT_TWO_STR,
67714+ GR_ONE_STR,
67715+ GR_STR_INT,
67716+ GR_TWO_STR_INT,
67717+ GR_TWO_INT,
67718+ GR_TWO_U64,
67719+ GR_THREE_INT,
67720+ GR_FIVE_INT_TWO_STR,
67721+ GR_TWO_STR,
67722+ GR_THREE_STR,
67723+ GR_FOUR_STR,
67724+ GR_STR_FILENAME,
67725+ GR_FILENAME_STR,
67726+ GR_FILENAME_TWO_INT,
67727+ GR_FILENAME_TWO_INT_STR,
67728+ GR_TEXTREL,
67729+ GR_PTRACE,
67730+ GR_RESOURCE,
67731+ GR_CAP,
67732+ GR_SIG,
67733+ GR_SIG2,
67734+ GR_CRASH1,
67735+ GR_CRASH2,
67736+ GR_PSACCT,
67737+ GR_RWXMAP
67738+};
67739+
67740+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67741+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67742+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67743+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67744+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67745+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67746+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67747+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67748+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67749+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67750+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67751+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67752+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67753+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67754+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67755+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67756+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67757+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67758+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67759+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67760+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67761+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67762+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67763+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67764+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67765+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67766+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67767+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67768+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67769+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67770+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67771+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67772+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67773+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67774+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67775+
67776+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67777+
67778+#endif
67779+
67780+#endif
67781diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67782new file mode 100644
67783index 0000000..2bd4c8d
67784--- /dev/null
67785+++ b/include/linux/grmsg.h
67786@@ -0,0 +1,111 @@
67787+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67788+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67789+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67790+#define GR_STOPMOD_MSG "denied modification of module state by "
67791+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67792+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67793+#define GR_IOPERM_MSG "denied use of ioperm() by "
67794+#define GR_IOPL_MSG "denied use of iopl() by "
67795+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67796+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67797+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67798+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67799+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67800+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67801+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67802+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67803+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67804+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67805+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67806+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67807+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67808+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67809+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67810+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67811+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67812+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67813+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67814+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67815+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67816+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67817+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67818+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67819+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67820+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67821+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67822+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67823+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67824+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67825+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67826+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67827+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67828+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67829+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67830+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67831+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67832+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67833+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67834+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67835+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67836+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67837+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67838+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
67839+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67840+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67841+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67842+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67843+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67844+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67845+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67846+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67847+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67848+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67849+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67850+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67851+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67852+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67853+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67854+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67855+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67856+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67857+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67858+#define GR_FAILFORK_MSG "failed fork with errno %s by "
67859+#define GR_NICE_CHROOT_MSG "denied priority change by "
67860+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67861+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67862+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67863+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67864+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67865+#define GR_TIME_MSG "time set by "
67866+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67867+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67868+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67869+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67870+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67871+#define GR_BIND_MSG "denied bind() by "
67872+#define GR_CONNECT_MSG "denied connect() by "
67873+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67874+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67875+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67876+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67877+#define GR_CAP_ACL_MSG "use of %s denied for "
67878+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67879+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67880+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67881+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67882+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67883+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67884+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67885+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67886+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67887+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67888+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67889+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67890+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67891+#define GR_VM86_MSG "denied use of vm86 by "
67892+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67893+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67894+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67895+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67896+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
67897+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
67898diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67899new file mode 100644
67900index 0000000..1ae241a
67901--- /dev/null
67902+++ b/include/linux/grsecurity.h
67903@@ -0,0 +1,257 @@
67904+#ifndef GR_SECURITY_H
67905+#define GR_SECURITY_H
67906+#include <linux/fs.h>
67907+#include <linux/fs_struct.h>
67908+#include <linux/binfmts.h>
67909+#include <linux/gracl.h>
67910+
67911+/* notify of brain-dead configs */
67912+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67913+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67914+#endif
67915+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67916+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67917+#endif
67918+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67919+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67920+#endif
67921+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67922+#error "CONFIG_PAX enabled, but no PaX options are enabled."
67923+#endif
67924+
67925+#include <linux/compat.h>
67926+
67927+struct user_arg_ptr {
67928+#ifdef CONFIG_COMPAT
67929+ bool is_compat;
67930+#endif
67931+ union {
67932+ const char __user *const __user *native;
67933+#ifdef CONFIG_COMPAT
67934+ const compat_uptr_t __user *compat;
67935+#endif
67936+ } ptr;
67937+};
67938+
67939+void gr_handle_brute_attach(unsigned long mm_flags);
67940+void gr_handle_brute_check(void);
67941+void gr_handle_kernel_exploit(void);
67942+int gr_process_user_ban(void);
67943+
67944+char gr_roletype_to_char(void);
67945+
67946+int gr_acl_enable_at_secure(void);
67947+
67948+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
67949+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
67950+
67951+void gr_del_task_from_ip_table(struct task_struct *p);
67952+
67953+int gr_pid_is_chrooted(struct task_struct *p);
67954+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67955+int gr_handle_chroot_nice(void);
67956+int gr_handle_chroot_sysctl(const int op);
67957+int gr_handle_chroot_setpriority(struct task_struct *p,
67958+ const int niceval);
67959+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67960+int gr_handle_chroot_chroot(const struct dentry *dentry,
67961+ const struct vfsmount *mnt);
67962+void gr_handle_chroot_chdir(struct path *path);
67963+int gr_handle_chroot_chmod(const struct dentry *dentry,
67964+ const struct vfsmount *mnt, const int mode);
67965+int gr_handle_chroot_mknod(const struct dentry *dentry,
67966+ const struct vfsmount *mnt, const int mode);
67967+int gr_handle_chroot_mount(const struct dentry *dentry,
67968+ const struct vfsmount *mnt,
67969+ const char *dev_name);
67970+int gr_handle_chroot_pivot(void);
67971+int gr_handle_chroot_unix(const pid_t pid);
67972+
67973+int gr_handle_rawio(const struct inode *inode);
67974+
67975+void gr_handle_ioperm(void);
67976+void gr_handle_iopl(void);
67977+
67978+umode_t gr_acl_umask(void);
67979+
67980+int gr_tpe_allow(const struct file *file);
67981+
67982+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67983+void gr_clear_chroot_entries(struct task_struct *task);
67984+
67985+void gr_log_forkfail(const int retval);
67986+void gr_log_timechange(void);
67987+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67988+void gr_log_chdir(const struct dentry *dentry,
67989+ const struct vfsmount *mnt);
67990+void gr_log_chroot_exec(const struct dentry *dentry,
67991+ const struct vfsmount *mnt);
67992+void gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv);
67993+void gr_log_remount(const char *devname, const int retval);
67994+void gr_log_unmount(const char *devname, const int retval);
67995+void gr_log_mount(const char *from, const char *to, const int retval);
67996+void gr_log_textrel(struct vm_area_struct *vma);
67997+void gr_log_rwxmmap(struct file *file);
67998+void gr_log_rwxmprotect(struct file *file);
67999+
68000+int gr_handle_follow_link(const struct inode *parent,
68001+ const struct inode *inode,
68002+ const struct dentry *dentry,
68003+ const struct vfsmount *mnt);
68004+int gr_handle_fifo(const struct dentry *dentry,
68005+ const struct vfsmount *mnt,
68006+ const struct dentry *dir, const int flag,
68007+ const int acc_mode);
68008+int gr_handle_hardlink(const struct dentry *dentry,
68009+ const struct vfsmount *mnt,
68010+ struct inode *inode,
68011+ const int mode, const struct filename *to);
68012+
68013+int gr_is_capable(const int cap);
68014+int gr_is_capable_nolog(const int cap);
68015+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
68016+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
68017+
68018+void gr_copy_label(struct task_struct *tsk);
68019+void gr_handle_crash(struct task_struct *task, const int sig);
68020+int gr_handle_signal(const struct task_struct *p, const int sig);
68021+int gr_check_crash_uid(const kuid_t uid);
68022+int gr_check_protected_task(const struct task_struct *task);
68023+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
68024+int gr_acl_handle_mmap(const struct file *file,
68025+ const unsigned long prot);
68026+int gr_acl_handle_mprotect(const struct file *file,
68027+ const unsigned long prot);
68028+int gr_check_hidden_task(const struct task_struct *tsk);
68029+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
68030+ const struct vfsmount *mnt);
68031+__u32 gr_acl_handle_utime(const struct dentry *dentry,
68032+ const struct vfsmount *mnt);
68033+__u32 gr_acl_handle_access(const struct dentry *dentry,
68034+ const struct vfsmount *mnt, const int fmode);
68035+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
68036+ const struct vfsmount *mnt, umode_t *mode);
68037+__u32 gr_acl_handle_chown(const struct dentry *dentry,
68038+ const struct vfsmount *mnt);
68039+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
68040+ const struct vfsmount *mnt);
68041+int gr_handle_ptrace(struct task_struct *task, const long request);
68042+int gr_handle_proc_ptrace(struct task_struct *task);
68043+__u32 gr_acl_handle_execve(const struct dentry *dentry,
68044+ const struct vfsmount *mnt);
68045+int gr_check_crash_exec(const struct file *filp);
68046+int gr_acl_is_enabled(void);
68047+void gr_set_kernel_label(struct task_struct *task);
68048+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
68049+ const kgid_t gid);
68050+int gr_set_proc_label(const struct dentry *dentry,
68051+ const struct vfsmount *mnt,
68052+ const int unsafe_flags);
68053+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
68054+ const struct vfsmount *mnt);
68055+__u32 gr_acl_handle_open(const struct dentry *dentry,
68056+ const struct vfsmount *mnt, int acc_mode);
68057+__u32 gr_acl_handle_creat(const struct dentry *dentry,
68058+ const struct dentry *p_dentry,
68059+ const struct vfsmount *p_mnt,
68060+ int open_flags, int acc_mode, const int imode);
68061+void gr_handle_create(const struct dentry *dentry,
68062+ const struct vfsmount *mnt);
68063+void gr_handle_proc_create(const struct dentry *dentry,
68064+ const struct inode *inode);
68065+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
68066+ const struct dentry *parent_dentry,
68067+ const struct vfsmount *parent_mnt,
68068+ const int mode);
68069+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
68070+ const struct dentry *parent_dentry,
68071+ const struct vfsmount *parent_mnt);
68072+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
68073+ const struct vfsmount *mnt);
68074+void gr_handle_delete(const ino_t ino, const dev_t dev);
68075+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
68076+ const struct vfsmount *mnt);
68077+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
68078+ const struct dentry *parent_dentry,
68079+ const struct vfsmount *parent_mnt,
68080+ const struct filename *from);
68081+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
68082+ const struct dentry *parent_dentry,
68083+ const struct vfsmount *parent_mnt,
68084+ const struct dentry *old_dentry,
68085+ const struct vfsmount *old_mnt, const struct filename *to);
68086+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
68087+int gr_acl_handle_rename(struct dentry *new_dentry,
68088+ struct dentry *parent_dentry,
68089+ const struct vfsmount *parent_mnt,
68090+ struct dentry *old_dentry,
68091+ struct inode *old_parent_inode,
68092+ struct vfsmount *old_mnt, const struct filename *newname);
68093+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
68094+ struct dentry *old_dentry,
68095+ struct dentry *new_dentry,
68096+ struct vfsmount *mnt, const __u8 replace);
68097+__u32 gr_check_link(const struct dentry *new_dentry,
68098+ const struct dentry *parent_dentry,
68099+ const struct vfsmount *parent_mnt,
68100+ const struct dentry *old_dentry,
68101+ const struct vfsmount *old_mnt);
68102+int gr_acl_handle_filldir(const struct file *file, const char *name,
68103+ const unsigned int namelen, const ino_t ino);
68104+
68105+__u32 gr_acl_handle_unix(const struct dentry *dentry,
68106+ const struct vfsmount *mnt);
68107+void gr_acl_handle_exit(void);
68108+void gr_acl_handle_psacct(struct task_struct *task, const long code);
68109+int gr_acl_handle_procpidmem(const struct task_struct *task);
68110+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
68111+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
68112+void gr_audit_ptrace(struct task_struct *task);
68113+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
68114+void gr_put_exec_file(struct task_struct *task);
68115+
68116+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
68117+
68118+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
68119+extern void gr_learn_resource(const struct task_struct *task, const int res,
68120+ const unsigned long wanted, const int gt);
68121+#else
68122+static inline void gr_learn_resource(const struct task_struct *task, const int res,
68123+ const unsigned long wanted, const int gt)
68124+{
68125+}
68126+#endif
68127+
68128+#ifdef CONFIG_GRKERNSEC_RESLOG
68129+extern void gr_log_resource(const struct task_struct *task, const int res,
68130+ const unsigned long wanted, const int gt);
68131+#else
68132+static inline void gr_log_resource(const struct task_struct *task, const int res,
68133+ const unsigned long wanted, const int gt)
68134+{
68135+}
68136+#endif
68137+
68138+#ifdef CONFIG_GRKERNSEC
68139+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
68140+void gr_handle_vm86(void);
68141+void gr_handle_mem_readwrite(u64 from, u64 to);
68142+
68143+void gr_log_badprocpid(const char *entry);
68144+
68145+extern int grsec_enable_dmesg;
68146+extern int grsec_disable_privio;
68147+
68148+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
68149+extern kgid_t grsec_proc_gid;
68150+#endif
68151+
68152+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
68153+extern int grsec_enable_chroot_findtask;
68154+#endif
68155+#ifdef CONFIG_GRKERNSEC_SETXID
68156+extern int grsec_enable_setxid;
68157+#endif
68158+#endif
68159+
68160+#endif
68161diff --git a/include/linux/grsock.h b/include/linux/grsock.h
68162new file mode 100644
68163index 0000000..e7ffaaf
68164--- /dev/null
68165+++ b/include/linux/grsock.h
68166@@ -0,0 +1,19 @@
68167+#ifndef __GRSOCK_H
68168+#define __GRSOCK_H
68169+
68170+extern void gr_attach_curr_ip(const struct sock *sk);
68171+extern int gr_handle_sock_all(const int family, const int type,
68172+ const int protocol);
68173+extern int gr_handle_sock_server(const struct sockaddr *sck);
68174+extern int gr_handle_sock_server_other(const struct sock *sck);
68175+extern int gr_handle_sock_client(const struct sockaddr *sck);
68176+extern int gr_search_connect(struct socket * sock,
68177+ struct sockaddr_in * addr);
68178+extern int gr_search_bind(struct socket * sock,
68179+ struct sockaddr_in * addr);
68180+extern int gr_search_listen(struct socket * sock);
68181+extern int gr_search_accept(struct socket * sock);
68182+extern int gr_search_socket(const int domain, const int type,
68183+ const int protocol);
68184+
68185+#endif
68186diff --git a/include/linux/highmem.h b/include/linux/highmem.h
68187index ef788b5..ac41b7b 100644
68188--- a/include/linux/highmem.h
68189+++ b/include/linux/highmem.h
68190@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
68191 kunmap_atomic(kaddr);
68192 }
68193
68194+static inline void sanitize_highpage(struct page *page)
68195+{
68196+ void *kaddr;
68197+ unsigned long flags;
68198+
68199+ local_irq_save(flags);
68200+ kaddr = kmap_atomic(page);
68201+ clear_page(kaddr);
68202+ kunmap_atomic(kaddr);
68203+ local_irq_restore(flags);
68204+}
68205+
68206 static inline void zero_user_segments(struct page *page,
68207 unsigned start1, unsigned end1,
68208 unsigned start2, unsigned end2)
68209diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
68210index 1c7b89a..7f52502 100644
68211--- a/include/linux/hwmon-sysfs.h
68212+++ b/include/linux/hwmon-sysfs.h
68213@@ -25,7 +25,8 @@
68214 struct sensor_device_attribute{
68215 struct device_attribute dev_attr;
68216 int index;
68217-};
68218+} __do_const;
68219+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
68220 #define to_sensor_dev_attr(_dev_attr) \
68221 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
68222
68223@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
68224 struct device_attribute dev_attr;
68225 u8 index;
68226 u8 nr;
68227-};
68228+} __do_const;
68229 #define to_sensor_dev_attr_2(_dev_attr) \
68230 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
68231
68232diff --git a/include/linux/i2c.h b/include/linux/i2c.h
68233index d0c4db7..61b3577 100644
68234--- a/include/linux/i2c.h
68235+++ b/include/linux/i2c.h
68236@@ -369,6 +369,7 @@ struct i2c_algorithm {
68237 /* To determine what the adapter supports */
68238 u32 (*functionality) (struct i2c_adapter *);
68239 };
68240+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
68241
68242 /*
68243 * i2c_adapter is the structure used to identify a physical i2c bus along
68244diff --git a/include/linux/i2o.h b/include/linux/i2o.h
68245index d23c3c2..eb63c81 100644
68246--- a/include/linux/i2o.h
68247+++ b/include/linux/i2o.h
68248@@ -565,7 +565,7 @@ struct i2o_controller {
68249 struct i2o_device *exec; /* Executive */
68250 #if BITS_PER_LONG == 64
68251 spinlock_t context_list_lock; /* lock for context_list */
68252- atomic_t context_list_counter; /* needed for unique contexts */
68253+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
68254 struct list_head context_list; /* list of context id's
68255 and pointers */
68256 #endif
68257diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
68258index aff7ad8..3942bbd 100644
68259--- a/include/linux/if_pppox.h
68260+++ b/include/linux/if_pppox.h
68261@@ -76,7 +76,7 @@ struct pppox_proto {
68262 int (*ioctl)(struct socket *sock, unsigned int cmd,
68263 unsigned long arg);
68264 struct module *owner;
68265-};
68266+} __do_const;
68267
68268 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
68269 extern void unregister_pppox_proto(int proto_num);
68270diff --git a/include/linux/init.h b/include/linux/init.h
68271index 10ed4f4..8e8490d 100644
68272--- a/include/linux/init.h
68273+++ b/include/linux/init.h
68274@@ -39,9 +39,36 @@
68275 * Also note, that this data cannot be "const".
68276 */
68277
68278+#ifdef MODULE
68279+#define add_init_latent_entropy
68280+#define add_devinit_latent_entropy
68281+#define add_cpuinit_latent_entropy
68282+#define add_meminit_latent_entropy
68283+#else
68284+#define add_init_latent_entropy __latent_entropy
68285+
68286+#ifdef CONFIG_HOTPLUG
68287+#define add_devinit_latent_entropy
68288+#else
68289+#define add_devinit_latent_entropy __latent_entropy
68290+#endif
68291+
68292+#ifdef CONFIG_HOTPLUG_CPU
68293+#define add_cpuinit_latent_entropy
68294+#else
68295+#define add_cpuinit_latent_entropy __latent_entropy
68296+#endif
68297+
68298+#ifdef CONFIG_MEMORY_HOTPLUG
68299+#define add_meminit_latent_entropy
68300+#else
68301+#define add_meminit_latent_entropy __latent_entropy
68302+#endif
68303+#endif
68304+
68305 /* These are for everybody (although not all archs will actually
68306 discard it in modules) */
68307-#define __init __section(.init.text) __cold notrace
68308+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
68309 #define __initdata __section(.init.data)
68310 #define __initconst __constsection(.init.rodata)
68311 #define __exitdata __section(.exit.data)
68312@@ -94,7 +121,7 @@
68313 #define __exit __section(.exit.text) __exitused __cold notrace
68314
68315 /* Used for HOTPLUG_CPU */
68316-#define __cpuinit __section(.cpuinit.text) __cold notrace
68317+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
68318 #define __cpuinitdata __section(.cpuinit.data)
68319 #define __cpuinitconst __constsection(.cpuinit.rodata)
68320 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
68321@@ -102,7 +129,7 @@
68322 #define __cpuexitconst __constsection(.cpuexit.rodata)
68323
68324 /* Used for MEMORY_HOTPLUG */
68325-#define __meminit __section(.meminit.text) __cold notrace
68326+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
68327 #define __meminitdata __section(.meminit.data)
68328 #define __meminitconst __constsection(.meminit.rodata)
68329 #define __memexit __section(.memexit.text) __exitused __cold notrace
68330diff --git a/include/linux/init_task.h b/include/linux/init_task.h
68331index 6d087c5..401cab8 100644
68332--- a/include/linux/init_task.h
68333+++ b/include/linux/init_task.h
68334@@ -143,6 +143,12 @@ extern struct task_group root_task_group;
68335
68336 #define INIT_TASK_COMM "swapper"
68337
68338+#ifdef CONFIG_X86
68339+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
68340+#else
68341+#define INIT_TASK_THREAD_INFO
68342+#endif
68343+
68344 /*
68345 * INIT_TASK is used to set up the first task table, touch at
68346 * your own risk!. Base=0, limit=0x1fffff (=2MB)
68347@@ -182,6 +188,7 @@ extern struct task_group root_task_group;
68348 RCU_POINTER_INITIALIZER(cred, &init_cred), \
68349 .comm = INIT_TASK_COMM, \
68350 .thread = INIT_THREAD, \
68351+ INIT_TASK_THREAD_INFO \
68352 .fs = &init_fs, \
68353 .files = &init_files, \
68354 .signal = &init_signals, \
68355diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
68356index 5fa5afe..ac55b25 100644
68357--- a/include/linux/interrupt.h
68358+++ b/include/linux/interrupt.h
68359@@ -430,7 +430,7 @@ enum
68360 /* map softirq index to softirq name. update 'softirq_to_name' in
68361 * kernel/softirq.c when adding a new softirq.
68362 */
68363-extern char *softirq_to_name[NR_SOFTIRQS];
68364+extern const char * const softirq_to_name[NR_SOFTIRQS];
68365
68366 /* softirq mask and active fields moved to irq_cpustat_t in
68367 * asm/hardirq.h to get better cache usage. KAO
68368@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
68369
68370 struct softirq_action
68371 {
68372- void (*action)(struct softirq_action *);
68373-};
68374+ void (*action)(void);
68375+} __no_const;
68376
68377 asmlinkage void do_softirq(void);
68378 asmlinkage void __do_softirq(void);
68379-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
68380+extern void open_softirq(int nr, void (*action)(void));
68381 extern void softirq_init(void);
68382 extern void __raise_softirq_irqoff(unsigned int nr);
68383
68384diff --git a/include/linux/iommu.h b/include/linux/iommu.h
68385index f3b99e1..9b73cee 100644
68386--- a/include/linux/iommu.h
68387+++ b/include/linux/iommu.h
68388@@ -101,7 +101,7 @@ struct iommu_ops {
68389 int (*domain_set_attr)(struct iommu_domain *domain,
68390 enum iommu_attr attr, void *data);
68391 unsigned long pgsize_bitmap;
68392-};
68393+} __do_const;
68394
68395 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
68396 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
68397diff --git a/include/linux/irq.h b/include/linux/irq.h
68398index fdf2c4a..5332486 100644
68399--- a/include/linux/irq.h
68400+++ b/include/linux/irq.h
68401@@ -328,7 +328,8 @@ struct irq_chip {
68402 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
68403
68404 unsigned long flags;
68405-};
68406+} __do_const;
68407+typedef struct irq_chip __no_const irq_chip_no_const;
68408
68409 /*
68410 * irq_chip specific flags
68411diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
68412index 6883e19..06992b1 100644
68413--- a/include/linux/kallsyms.h
68414+++ b/include/linux/kallsyms.h
68415@@ -15,7 +15,8 @@
68416
68417 struct module;
68418
68419-#ifdef CONFIG_KALLSYMS
68420+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
68421+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
68422 /* Lookup the address for a symbol. Returns 0 if not found. */
68423 unsigned long kallsyms_lookup_name(const char *name);
68424
68425@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
68426 /* Stupid that this does nothing, but I didn't create this mess. */
68427 #define __print_symbol(fmt, addr)
68428 #endif /*CONFIG_KALLSYMS*/
68429+#else /* when included by kallsyms.c, vsnprintf.c, or
68430+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
68431+extern void __print_symbol(const char *fmt, unsigned long address);
68432+extern int sprint_backtrace(char *buffer, unsigned long address);
68433+extern int sprint_symbol(char *buffer, unsigned long address);
68434+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
68435+const char *kallsyms_lookup(unsigned long addr,
68436+ unsigned long *symbolsize,
68437+ unsigned long *offset,
68438+ char **modname, char *namebuf);
68439+#endif
68440
68441 /* This macro allows us to keep printk typechecking */
68442 static __printf(1, 2)
68443diff --git a/include/linux/key-type.h b/include/linux/key-type.h
68444index 518a53a..5e28358 100644
68445--- a/include/linux/key-type.h
68446+++ b/include/linux/key-type.h
68447@@ -125,7 +125,7 @@ struct key_type {
68448 /* internal fields */
68449 struct list_head link; /* link in types list */
68450 struct lock_class_key lock_class; /* key->sem lock class */
68451-};
68452+} __do_const;
68453
68454 extern struct key_type key_type_keyring;
68455
68456diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
68457index 4dff0c6..1ca9b72 100644
68458--- a/include/linux/kgdb.h
68459+++ b/include/linux/kgdb.h
68460@@ -53,7 +53,7 @@ extern int kgdb_connected;
68461 extern int kgdb_io_module_registered;
68462
68463 extern atomic_t kgdb_setting_breakpoint;
68464-extern atomic_t kgdb_cpu_doing_single_step;
68465+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
68466
68467 extern struct task_struct *kgdb_usethread;
68468 extern struct task_struct *kgdb_contthread;
68469@@ -255,7 +255,7 @@ struct kgdb_arch {
68470 void (*correct_hw_break)(void);
68471
68472 void (*enable_nmi)(bool on);
68473-};
68474+} __do_const;
68475
68476 /**
68477 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
68478@@ -280,7 +280,7 @@ struct kgdb_io {
68479 void (*pre_exception) (void);
68480 void (*post_exception) (void);
68481 int is_console;
68482-};
68483+} __do_const;
68484
68485 extern struct kgdb_arch arch_kgdb_ops;
68486
68487diff --git a/include/linux/kmod.h b/include/linux/kmod.h
68488index 5398d58..5883a34 100644
68489--- a/include/linux/kmod.h
68490+++ b/include/linux/kmod.h
68491@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
68492 * usually useless though. */
68493 extern __printf(2, 3)
68494 int __request_module(bool wait, const char *name, ...);
68495+extern __printf(3, 4)
68496+int ___request_module(bool wait, char *param_name, const char *name, ...);
68497 #define request_module(mod...) __request_module(true, mod)
68498 #define request_module_nowait(mod...) __request_module(false, mod)
68499 #define try_then_request_module(x, mod...) \
68500diff --git a/include/linux/kobject.h b/include/linux/kobject.h
68501index 939b112..ed6ed51 100644
68502--- a/include/linux/kobject.h
68503+++ b/include/linux/kobject.h
68504@@ -111,7 +111,7 @@ struct kobj_type {
68505 struct attribute **default_attrs;
68506 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
68507 const void *(*namespace)(struct kobject *kobj);
68508-};
68509+} __do_const;
68510
68511 struct kobj_uevent_env {
68512 char *envp[UEVENT_NUM_ENVP];
68513@@ -134,6 +134,7 @@ struct kobj_attribute {
68514 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
68515 const char *buf, size_t count);
68516 };
68517+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
68518
68519 extern const struct sysfs_ops kobj_sysfs_ops;
68520
68521diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
68522index f66b065..c2c29b4 100644
68523--- a/include/linux/kobject_ns.h
68524+++ b/include/linux/kobject_ns.h
68525@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
68526 const void *(*netlink_ns)(struct sock *sk);
68527 const void *(*initial_ns)(void);
68528 void (*drop_ns)(void *);
68529-};
68530+} __do_const;
68531
68532 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
68533 int kobj_ns_type_registered(enum kobj_ns_type type);
68534diff --git a/include/linux/kref.h b/include/linux/kref.h
68535index 4972e6e..de4d19b 100644
68536--- a/include/linux/kref.h
68537+++ b/include/linux/kref.h
68538@@ -64,7 +64,7 @@ static inline void kref_get(struct kref *kref)
68539 static inline int kref_sub(struct kref *kref, unsigned int count,
68540 void (*release)(struct kref *kref))
68541 {
68542- WARN_ON(release == NULL);
68543+ BUG_ON(release == NULL);
68544
68545 if (atomic_sub_and_test((int) count, &kref->refcount)) {
68546 release(kref);
68547diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
68548index 2c497ab..afe32f5 100644
68549--- a/include/linux/kvm_host.h
68550+++ b/include/linux/kvm_host.h
68551@@ -418,7 +418,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
68552 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
68553 void vcpu_put(struct kvm_vcpu *vcpu);
68554
68555-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
68556+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
68557 struct module *module);
68558 void kvm_exit(void);
68559
68560@@ -574,7 +574,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
68561 struct kvm_guest_debug *dbg);
68562 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
68563
68564-int kvm_arch_init(void *opaque);
68565+int kvm_arch_init(const void *opaque);
68566 void kvm_arch_exit(void);
68567
68568 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
68569diff --git a/include/linux/libata.h b/include/linux/libata.h
68570index 649e5f8..ead5194 100644
68571--- a/include/linux/libata.h
68572+++ b/include/linux/libata.h
68573@@ -915,7 +915,7 @@ struct ata_port_operations {
68574 * fields must be pointers.
68575 */
68576 const struct ata_port_operations *inherits;
68577-};
68578+} __do_const;
68579
68580 struct ata_port_info {
68581 unsigned long flags;
68582diff --git a/include/linux/list.h b/include/linux/list.h
68583index cc6d2aa..c10ee83 100644
68584--- a/include/linux/list.h
68585+++ b/include/linux/list.h
68586@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
68587 extern void list_del(struct list_head *entry);
68588 #endif
68589
68590+extern void __pax_list_add(struct list_head *new,
68591+ struct list_head *prev,
68592+ struct list_head *next);
68593+static inline void pax_list_add(struct list_head *new, struct list_head *head)
68594+{
68595+ __pax_list_add(new, head, head->next);
68596+}
68597+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
68598+{
68599+ __pax_list_add(new, head->prev, head);
68600+}
68601+extern void pax_list_del(struct list_head *entry);
68602+
68603 /**
68604 * list_replace - replace old entry by new one
68605 * @old : the element to be replaced
68606@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
68607 INIT_LIST_HEAD(entry);
68608 }
68609
68610+extern void pax_list_del_init(struct list_head *entry);
68611+
68612 /**
68613 * list_move - delete from one list and add as another's head
68614 * @list: the entry to move
68615diff --git a/include/linux/mm.h b/include/linux/mm.h
68616index 66e2f7c..ea88001 100644
68617--- a/include/linux/mm.h
68618+++ b/include/linux/mm.h
68619@@ -101,6 +101,11 @@ extern unsigned int kobjsize(const void *objp);
68620 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
68621 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
68622 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
68623+
68624+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68625+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
68626+#endif
68627+
68628 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
68629
68630 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68631@@ -231,6 +236,7 @@ struct vm_operations_struct {
68632 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
68633 unsigned long size, pgoff_t pgoff);
68634 };
68635+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
68636
68637 struct mmu_gather;
68638 struct inode;
68639@@ -1068,34 +1074,6 @@ int set_page_dirty(struct page *page);
68640 int set_page_dirty_lock(struct page *page);
68641 int clear_page_dirty_for_io(struct page *page);
68642
68643-/* Is the vma a continuation of the stack vma above it? */
68644-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
68645-{
68646- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68647-}
68648-
68649-static inline int stack_guard_page_start(struct vm_area_struct *vma,
68650- unsigned long addr)
68651-{
68652- return (vma->vm_flags & VM_GROWSDOWN) &&
68653- (vma->vm_start == addr) &&
68654- !vma_growsdown(vma->vm_prev, addr);
68655-}
68656-
68657-/* Is the vma a continuation of the stack vma below it? */
68658-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
68659-{
68660- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
68661-}
68662-
68663-static inline int stack_guard_page_end(struct vm_area_struct *vma,
68664- unsigned long addr)
68665-{
68666- return (vma->vm_flags & VM_GROWSUP) &&
68667- (vma->vm_end == addr) &&
68668- !vma_growsup(vma->vm_next, addr);
68669-}
68670-
68671 extern pid_t
68672 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
68673
68674@@ -1198,6 +1176,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
68675 }
68676 #endif
68677
68678+#ifdef CONFIG_MMU
68679+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
68680+#else
68681+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
68682+{
68683+ return __pgprot(0);
68684+}
68685+#endif
68686+
68687 int vma_wants_writenotify(struct vm_area_struct *vma);
68688
68689 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
68690@@ -1216,8 +1203,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
68691 {
68692 return 0;
68693 }
68694+
68695+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
68696+ unsigned long address)
68697+{
68698+ return 0;
68699+}
68700 #else
68701 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
68702+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
68703 #endif
68704
68705 #ifdef __PAGETABLE_PMD_FOLDED
68706@@ -1226,8 +1220,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
68707 {
68708 return 0;
68709 }
68710+
68711+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
68712+ unsigned long address)
68713+{
68714+ return 0;
68715+}
68716 #else
68717 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
68718+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
68719 #endif
68720
68721 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
68722@@ -1245,11 +1246,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
68723 NULL: pud_offset(pgd, address);
68724 }
68725
68726+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
68727+{
68728+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
68729+ NULL: pud_offset(pgd, address);
68730+}
68731+
68732 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
68733 {
68734 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
68735 NULL: pmd_offset(pud, address);
68736 }
68737+
68738+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
68739+{
68740+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
68741+ NULL: pmd_offset(pud, address);
68742+}
68743 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
68744
68745 #if USE_SPLIT_PTLOCKS
68746@@ -1479,6 +1492,7 @@ extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
68747 unsigned long, unsigned long,
68748 unsigned long, unsigned long);
68749 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68750+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68751
68752 /* These take the mm semaphore themselves */
68753 extern unsigned long vm_brk(unsigned long, unsigned long);
68754@@ -1573,6 +1587,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68755 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68756 struct vm_area_struct **pprev);
68757
68758+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68759+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68760+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68761+
68762 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68763 NULL if none. Assume start_addr < end_addr. */
68764 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68765@@ -1601,15 +1619,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
68766 return vma;
68767 }
68768
68769-#ifdef CONFIG_MMU
68770-pgprot_t vm_get_page_prot(unsigned long vm_flags);
68771-#else
68772-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
68773-{
68774- return __pgprot(0);
68775-}
68776-#endif
68777-
68778 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
68779 unsigned long change_prot_numa(struct vm_area_struct *vma,
68780 unsigned long start, unsigned long end);
68781@@ -1721,7 +1730,7 @@ extern int unpoison_memory(unsigned long pfn);
68782 extern int sysctl_memory_failure_early_kill;
68783 extern int sysctl_memory_failure_recovery;
68784 extern void shake_page(struct page *p, int access);
68785-extern atomic_long_t mce_bad_pages;
68786+extern atomic_long_unchecked_t mce_bad_pages;
68787 extern int soft_offline_page(struct page *page, int flags);
68788
68789 extern void dump_page(struct page *page);
68790@@ -1752,5 +1761,11 @@ static inline unsigned int debug_guardpage_minorder(void) { return 0; }
68791 static inline bool page_is_guard(struct page *page) { return false; }
68792 #endif /* CONFIG_DEBUG_PAGEALLOC */
68793
68794+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68795+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68796+#else
68797+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68798+#endif
68799+
68800 #endif /* __KERNEL__ */
68801 #endif /* _LINUX_MM_H */
68802diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68803index f8f5162..6276a36 100644
68804--- a/include/linux/mm_types.h
68805+++ b/include/linux/mm_types.h
68806@@ -288,6 +288,8 @@ struct vm_area_struct {
68807 #ifdef CONFIG_NUMA
68808 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68809 #endif
68810+
68811+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68812 };
68813
68814 struct core_thread {
68815@@ -362,7 +364,7 @@ struct mm_struct {
68816 unsigned long def_flags;
68817 unsigned long nr_ptes; /* Page table pages */
68818 unsigned long start_code, end_code, start_data, end_data;
68819- unsigned long start_brk, brk, start_stack;
68820+ unsigned long brk_gap, start_brk, brk, start_stack;
68821 unsigned long arg_start, arg_end, env_start, env_end;
68822
68823 unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
68824@@ -436,6 +438,24 @@ struct mm_struct {
68825 int first_nid;
68826 #endif
68827 struct uprobes_state uprobes_state;
68828+
68829+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68830+ unsigned long pax_flags;
68831+#endif
68832+
68833+#ifdef CONFIG_PAX_DLRESOLVE
68834+ unsigned long call_dl_resolve;
68835+#endif
68836+
68837+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68838+ unsigned long call_syscall;
68839+#endif
68840+
68841+#ifdef CONFIG_PAX_ASLR
68842+ unsigned long delta_mmap; /* randomized offset */
68843+ unsigned long delta_stack; /* randomized offset */
68844+#endif
68845+
68846 };
68847
68848 /* first nid will either be a valid NID or one of these values */
68849diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
68850index c5d5278..f0b68c8 100644
68851--- a/include/linux/mmiotrace.h
68852+++ b/include/linux/mmiotrace.h
68853@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
68854 /* Called from ioremap.c */
68855 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
68856 void __iomem *addr);
68857-extern void mmiotrace_iounmap(volatile void __iomem *addr);
68858+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
68859
68860 /* For anyone to insert markers. Remember trailing newline. */
68861 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
68862@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
68863 {
68864 }
68865
68866-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
68867+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
68868 {
68869 }
68870
68871diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68872index 73b64a3..6562925 100644
68873--- a/include/linux/mmzone.h
68874+++ b/include/linux/mmzone.h
68875@@ -412,7 +412,7 @@ struct zone {
68876 unsigned long flags; /* zone flags, see below */
68877
68878 /* Zone statistics */
68879- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68880+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68881
68882 /*
68883 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
68884diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68885index fed3def..c933f99 100644
68886--- a/include/linux/mod_devicetable.h
68887+++ b/include/linux/mod_devicetable.h
68888@@ -12,7 +12,7 @@
68889 typedef unsigned long kernel_ulong_t;
68890 #endif
68891
68892-#define PCI_ANY_ID (~0)
68893+#define PCI_ANY_ID ((__u16)~0)
68894
68895 struct pci_device_id {
68896 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68897@@ -139,7 +139,7 @@ struct usb_device_id {
68898 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68899 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
68900
68901-#define HID_ANY_ID (~0)
68902+#define HID_ANY_ID (~0U)
68903 #define HID_BUS_ANY 0xffff
68904 #define HID_GROUP_ANY 0x0000
68905
68906@@ -498,7 +498,7 @@ struct dmi_system_id {
68907 const char *ident;
68908 struct dmi_strmatch matches[4];
68909 void *driver_data;
68910-};
68911+} __do_const;
68912 /*
68913 * struct dmi_device_id appears during expansion of
68914 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
68915diff --git a/include/linux/module.h b/include/linux/module.h
68916index 1375ee3..ced8177 100644
68917--- a/include/linux/module.h
68918+++ b/include/linux/module.h
68919@@ -17,9 +17,11 @@
68920 #include <linux/moduleparam.h>
68921 #include <linux/tracepoint.h>
68922 #include <linux/export.h>
68923+#include <linux/fs.h>
68924
68925 #include <linux/percpu.h>
68926 #include <asm/module.h>
68927+#include <asm/pgtable.h>
68928
68929 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
68930 #define MODULE_SIG_STRING "~Module signature appended~\n"
68931@@ -54,12 +56,13 @@ struct module_attribute {
68932 int (*test)(struct module *);
68933 void (*free)(struct module *);
68934 };
68935+typedef struct module_attribute __no_const module_attribute_no_const;
68936
68937 struct module_version_attribute {
68938 struct module_attribute mattr;
68939 const char *module_name;
68940 const char *version;
68941-} __attribute__ ((__aligned__(sizeof(void *))));
68942+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
68943
68944 extern ssize_t __modver_version_show(struct module_attribute *,
68945 struct module_kobject *, char *);
68946@@ -232,7 +235,7 @@ struct module
68947
68948 /* Sysfs stuff. */
68949 struct module_kobject mkobj;
68950- struct module_attribute *modinfo_attrs;
68951+ module_attribute_no_const *modinfo_attrs;
68952 const char *version;
68953 const char *srcversion;
68954 struct kobject *holders_dir;
68955@@ -281,19 +284,16 @@ struct module
68956 int (*init)(void);
68957
68958 /* If this is non-NULL, vfree after init() returns */
68959- void *module_init;
68960+ void *module_init_rx, *module_init_rw;
68961
68962 /* Here is the actual code + data, vfree'd on unload. */
68963- void *module_core;
68964+ void *module_core_rx, *module_core_rw;
68965
68966 /* Here are the sizes of the init and core sections */
68967- unsigned int init_size, core_size;
68968+ unsigned int init_size_rw, core_size_rw;
68969
68970 /* The size of the executable code in each section. */
68971- unsigned int init_text_size, core_text_size;
68972-
68973- /* Size of RO sections of the module (text+rodata) */
68974- unsigned int init_ro_size, core_ro_size;
68975+ unsigned int init_size_rx, core_size_rx;
68976
68977 /* Arch-specific module values */
68978 struct mod_arch_specific arch;
68979@@ -349,6 +349,10 @@ struct module
68980 #ifdef CONFIG_EVENT_TRACING
68981 struct ftrace_event_call **trace_events;
68982 unsigned int num_trace_events;
68983+ struct file_operations trace_id;
68984+ struct file_operations trace_enable;
68985+ struct file_operations trace_format;
68986+ struct file_operations trace_filter;
68987 #endif
68988 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68989 unsigned int num_ftrace_callsites;
68990@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
68991 bool is_module_percpu_address(unsigned long addr);
68992 bool is_module_text_address(unsigned long addr);
68993
68994+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68995+{
68996+
68997+#ifdef CONFIG_PAX_KERNEXEC
68998+ if (ktla_ktva(addr) >= (unsigned long)start &&
68999+ ktla_ktva(addr) < (unsigned long)start + size)
69000+ return 1;
69001+#endif
69002+
69003+ return ((void *)addr >= start && (void *)addr < start + size);
69004+}
69005+
69006+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
69007+{
69008+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
69009+}
69010+
69011+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
69012+{
69013+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
69014+}
69015+
69016+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
69017+{
69018+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
69019+}
69020+
69021+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
69022+{
69023+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
69024+}
69025+
69026 static inline int within_module_core(unsigned long addr, struct module *mod)
69027 {
69028- return (unsigned long)mod->module_core <= addr &&
69029- addr < (unsigned long)mod->module_core + mod->core_size;
69030+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
69031 }
69032
69033 static inline int within_module_init(unsigned long addr, struct module *mod)
69034 {
69035- return (unsigned long)mod->module_init <= addr &&
69036- addr < (unsigned long)mod->module_init + mod->init_size;
69037+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
69038 }
69039
69040 /* Search for module by name: must hold module_mutex. */
69041diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
69042index 560ca53..5ee8d73 100644
69043--- a/include/linux/moduleloader.h
69044+++ b/include/linux/moduleloader.h
69045@@ -23,11 +23,23 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
69046
69047 /* Allocator used for allocating struct module, core sections and init
69048 sections. Returns NULL on failure. */
69049-void *module_alloc(unsigned long size);
69050+void *module_alloc(unsigned long size) __size_overflow(1);
69051+
69052+#ifdef CONFIG_PAX_KERNEXEC
69053+void *module_alloc_exec(unsigned long size) __size_overflow(1);
69054+#else
69055+#define module_alloc_exec(x) module_alloc(x)
69056+#endif
69057
69058 /* Free memory returned from module_alloc. */
69059 void module_free(struct module *mod, void *module_region);
69060
69061+#ifdef CONFIG_PAX_KERNEXEC
69062+void module_free_exec(struct module *mod, void *module_region);
69063+#else
69064+#define module_free_exec(x, y) module_free((x), (y))
69065+#endif
69066+
69067 /*
69068 * Apply the given relocation to the (simplified) ELF. Return -error
69069 * or 0.
69070@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
69071 unsigned int relsec,
69072 struct module *me)
69073 {
69074+#ifdef CONFIG_MODULES
69075 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69076+#endif
69077 return -ENOEXEC;
69078 }
69079 #endif
69080@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
69081 unsigned int relsec,
69082 struct module *me)
69083 {
69084+#ifdef CONFIG_MODULES
69085 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
69086+#endif
69087 return -ENOEXEC;
69088 }
69089 #endif
69090diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
69091index 137b419..fe663ec 100644
69092--- a/include/linux/moduleparam.h
69093+++ b/include/linux/moduleparam.h
69094@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
69095 * @len is usually just sizeof(string).
69096 */
69097 #define module_param_string(name, string, len, perm) \
69098- static const struct kparam_string __param_string_##name \
69099+ static const struct kparam_string __param_string_##name __used \
69100 = { len, string }; \
69101 __module_param_call(MODULE_PARAM_PREFIX, name, \
69102 &param_ops_string, \
69103@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
69104 */
69105 #define module_param_array_named(name, array, type, nump, perm) \
69106 param_check_##type(name, &(array)[0]); \
69107- static const struct kparam_array __param_arr_##name \
69108+ static const struct kparam_array __param_arr_##name __used \
69109 = { .max = ARRAY_SIZE(array), .num = nump, \
69110 .ops = &param_ops_##type, \
69111 .elemsize = sizeof(array[0]), .elem = array }; \
69112diff --git a/include/linux/namei.h b/include/linux/namei.h
69113index 5a5ff57..5ae5070 100644
69114--- a/include/linux/namei.h
69115+++ b/include/linux/namei.h
69116@@ -19,7 +19,7 @@ struct nameidata {
69117 unsigned seq;
69118 int last_type;
69119 unsigned depth;
69120- char *saved_names[MAX_NESTED_LINKS + 1];
69121+ const char *saved_names[MAX_NESTED_LINKS + 1];
69122 };
69123
69124 /*
69125@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
69126
69127 extern void nd_jump_link(struct nameidata *nd, struct path *path);
69128
69129-static inline void nd_set_link(struct nameidata *nd, char *path)
69130+static inline void nd_set_link(struct nameidata *nd, const char *path)
69131 {
69132 nd->saved_names[nd->depth] = path;
69133 }
69134
69135-static inline char *nd_get_link(struct nameidata *nd)
69136+static inline const char *nd_get_link(const struct nameidata *nd)
69137 {
69138 return nd->saved_names[nd->depth];
69139 }
69140diff --git a/include/linux/net.h b/include/linux/net.h
69141index aa16731..514b875 100644
69142--- a/include/linux/net.h
69143+++ b/include/linux/net.h
69144@@ -183,7 +183,7 @@ struct net_proto_family {
69145 int (*create)(struct net *net, struct socket *sock,
69146 int protocol, int kern);
69147 struct module *owner;
69148-};
69149+} __do_const;
69150
69151 struct iovec;
69152 struct kvec;
69153diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
69154index 9ef07d0..130a5d9 100644
69155--- a/include/linux/netdevice.h
69156+++ b/include/linux/netdevice.h
69157@@ -1012,6 +1012,7 @@ struct net_device_ops {
69158 u32 pid, u32 seq,
69159 struct net_device *dev);
69160 };
69161+typedef struct net_device_ops __no_const net_device_ops_no_const;
69162
69163 /*
69164 * The DEVICE structure.
69165@@ -1078,7 +1079,7 @@ struct net_device {
69166 int iflink;
69167
69168 struct net_device_stats stats;
69169- atomic_long_t rx_dropped; /* dropped packets by core network
69170+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
69171 * Do not use this in drivers.
69172 */
69173
69174diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
69175index ee14284..bc65d63 100644
69176--- a/include/linux/netfilter.h
69177+++ b/include/linux/netfilter.h
69178@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
69179 #endif
69180 /* Use the module struct to lock set/get code in place */
69181 struct module *owner;
69182-};
69183+} __do_const;
69184
69185 /* Function to register/unregister hook points. */
69186 int nf_register_hook(struct nf_hook_ops *reg);
69187diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
69188index 7958e84..ed74d7a 100644
69189--- a/include/linux/netfilter/ipset/ip_set.h
69190+++ b/include/linux/netfilter/ipset/ip_set.h
69191@@ -98,7 +98,7 @@ struct ip_set_type_variant {
69192 /* Return true if "b" set is the same as "a"
69193 * according to the create set parameters */
69194 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
69195-};
69196+} __do_const;
69197
69198 /* The core set type structure */
69199 struct ip_set_type {
69200diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
69201index 4966dde..7d8ce06 100644
69202--- a/include/linux/netfilter/nfnetlink.h
69203+++ b/include/linux/netfilter/nfnetlink.h
69204@@ -16,7 +16,7 @@ struct nfnl_callback {
69205 const struct nlattr * const cda[]);
69206 const struct nla_policy *policy; /* netlink attribute policy */
69207 const u_int16_t attr_count; /* number of nlattr's */
69208-};
69209+} __do_const;
69210
69211 struct nfnetlink_subsystem {
69212 const char *name;
69213diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
69214new file mode 100644
69215index 0000000..33f4af8
69216--- /dev/null
69217+++ b/include/linux/netfilter/xt_gradm.h
69218@@ -0,0 +1,9 @@
69219+#ifndef _LINUX_NETFILTER_XT_GRADM_H
69220+#define _LINUX_NETFILTER_XT_GRADM_H 1
69221+
69222+struct xt_gradm_mtinfo {
69223+ __u16 flags;
69224+ __u16 invflags;
69225+};
69226+
69227+#endif
69228diff --git a/include/linux/nls.h b/include/linux/nls.h
69229index 5dc635f..35f5e11 100644
69230--- a/include/linux/nls.h
69231+++ b/include/linux/nls.h
69232@@ -31,7 +31,7 @@ struct nls_table {
69233 const unsigned char *charset2upper;
69234 struct module *owner;
69235 struct nls_table *next;
69236-};
69237+} __do_const;
69238
69239 /* this value hold the maximum octet of charset */
69240 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
69241diff --git a/include/linux/notifier.h b/include/linux/notifier.h
69242index d65746e..62e72c2 100644
69243--- a/include/linux/notifier.h
69244+++ b/include/linux/notifier.h
69245@@ -51,7 +51,8 @@ struct notifier_block {
69246 int (*notifier_call)(struct notifier_block *, unsigned long, void *);
69247 struct notifier_block __rcu *next;
69248 int priority;
69249-};
69250+} __do_const;
69251+typedef struct notifier_block __no_const notifier_block_no_const;
69252
69253 struct atomic_notifier_head {
69254 spinlock_t lock;
69255diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
69256index a4c5624..79d6d88 100644
69257--- a/include/linux/oprofile.h
69258+++ b/include/linux/oprofile.h
69259@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
69260 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
69261 char const * name, ulong * val);
69262
69263-/** Create a file for read-only access to an atomic_t. */
69264+/** Create a file for read-only access to an atomic_unchecked_t. */
69265 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
69266- char const * name, atomic_t * val);
69267+ char const * name, atomic_unchecked_t * val);
69268
69269 /** create a directory */
69270 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
69271diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
69272index 45fc162..01a4068 100644
69273--- a/include/linux/pci_hotplug.h
69274+++ b/include/linux/pci_hotplug.h
69275@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
69276 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
69277 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
69278 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
69279-};
69280+} __do_const;
69281+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
69282
69283 /**
69284 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
69285diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
69286index 6bfb2faa..e5bc5e5 100644
69287--- a/include/linux/perf_event.h
69288+++ b/include/linux/perf_event.h
69289@@ -328,8 +328,8 @@ struct perf_event {
69290
69291 enum perf_event_active_state state;
69292 unsigned int attach_state;
69293- local64_t count;
69294- atomic64_t child_count;
69295+ local64_t count; /* PaX: fix it one day */
69296+ atomic64_unchecked_t child_count;
69297
69298 /*
69299 * These are the total time in nanoseconds that the event
69300@@ -380,8 +380,8 @@ struct perf_event {
69301 * These accumulate total time (in nanoseconds) that children
69302 * events have been enabled and running, respectively.
69303 */
69304- atomic64_t child_total_time_enabled;
69305- atomic64_t child_total_time_running;
69306+ atomic64_unchecked_t child_total_time_enabled;
69307+ atomic64_unchecked_t child_total_time_running;
69308
69309 /*
69310 * Protect attach/detach and child_list:
69311@@ -801,7 +801,7 @@ static inline void perf_event_task_tick(void) { }
69312 */
69313 #define perf_cpu_notifier(fn) \
69314 do { \
69315- static struct notifier_block fn##_nb __cpuinitdata = \
69316+ static struct notifier_block fn##_nb = \
69317 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
69318 unsigned long cpu = smp_processor_id(); \
69319 unsigned long flags; \
69320diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
69321index ad1a427..6419649 100644
69322--- a/include/linux/pipe_fs_i.h
69323+++ b/include/linux/pipe_fs_i.h
69324@@ -45,9 +45,9 @@ struct pipe_buffer {
69325 struct pipe_inode_info {
69326 wait_queue_head_t wait;
69327 unsigned int nrbufs, curbuf, buffers;
69328- unsigned int readers;
69329- unsigned int writers;
69330- unsigned int waiting_writers;
69331+ atomic_t readers;
69332+ atomic_t writers;
69333+ atomic_t waiting_writers;
69334 unsigned int r_counter;
69335 unsigned int w_counter;
69336 struct page *tmp_page;
69337diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
69338index 5f28cae..3d23723 100644
69339--- a/include/linux/platform_data/usb-ehci-s5p.h
69340+++ b/include/linux/platform_data/usb-ehci-s5p.h
69341@@ -14,7 +14,7 @@
69342 struct s5p_ehci_platdata {
69343 int (*phy_init)(struct platform_device *pdev, int type);
69344 int (*phy_exit)(struct platform_device *pdev, int type);
69345-};
69346+} __no_const;
69347
69348 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
69349
69350diff --git a/include/linux/platform_data/usb-exynos.h b/include/linux/platform_data/usb-exynos.h
69351index c256c59..8ea94c7 100644
69352--- a/include/linux/platform_data/usb-exynos.h
69353+++ b/include/linux/platform_data/usb-exynos.h
69354@@ -14,7 +14,7 @@
69355 struct exynos4_ohci_platdata {
69356 int (*phy_init)(struct platform_device *pdev, int type);
69357 int (*phy_exit)(struct platform_device *pdev, int type);
69358-};
69359+} __no_const;
69360
69361 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
69362
69363diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
69364index 7c1d252..c5c773e 100644
69365--- a/include/linux/pm_domain.h
69366+++ b/include/linux/pm_domain.h
69367@@ -48,7 +48,7 @@ struct gpd_dev_ops {
69368
69369 struct gpd_cpu_data {
69370 unsigned int saved_exit_latency;
69371- struct cpuidle_state *idle_state;
69372+ cpuidle_state_no_const *idle_state;
69373 };
69374
69375 struct generic_pm_domain {
69376diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
69377index f271860..6b3bec5 100644
69378--- a/include/linux/pm_runtime.h
69379+++ b/include/linux/pm_runtime.h
69380@@ -97,7 +97,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
69381
69382 static inline void pm_runtime_mark_last_busy(struct device *dev)
69383 {
69384- ACCESS_ONCE(dev->power.last_busy) = jiffies;
69385+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
69386 }
69387
69388 #else /* !CONFIG_PM_RUNTIME */
69389diff --git a/include/linux/pnp.h b/include/linux/pnp.h
69390index 195aafc..49a7bc2 100644
69391--- a/include/linux/pnp.h
69392+++ b/include/linux/pnp.h
69393@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
69394 struct pnp_fixup {
69395 char id[7];
69396 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
69397-};
69398+} __do_const;
69399
69400 /* config parameters */
69401 #define PNP_CONFIG_NORMAL 0x0001
69402diff --git a/include/linux/poison.h b/include/linux/poison.h
69403index 2110a81..13a11bb 100644
69404--- a/include/linux/poison.h
69405+++ b/include/linux/poison.h
69406@@ -19,8 +19,8 @@
69407 * under normal circumstances, used to verify that nobody uses
69408 * non-initialized list entries.
69409 */
69410-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
69411-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
69412+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
69413+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
69414
69415 /********** include/linux/timer.h **********/
69416 /*
69417diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
69418index c0f44c2..1572583 100644
69419--- a/include/linux/power/smartreflex.h
69420+++ b/include/linux/power/smartreflex.h
69421@@ -238,7 +238,7 @@ struct omap_sr_class_data {
69422 int (*notify)(struct omap_sr *sr, u32 status);
69423 u8 notify_flags;
69424 u8 class_type;
69425-};
69426+} __do_const;
69427
69428 /**
69429 * struct omap_sr_nvalue_table - Smartreflex n-target value info
69430diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
69431index 4ea1d37..80f4b33 100644
69432--- a/include/linux/ppp-comp.h
69433+++ b/include/linux/ppp-comp.h
69434@@ -84,7 +84,7 @@ struct compressor {
69435 struct module *owner;
69436 /* Extra skb space needed by the compressor algorithm */
69437 unsigned int comp_extra;
69438-};
69439+} __do_const;
69440
69441 /*
69442 * The return value from decompress routine is the length of the
69443diff --git a/include/linux/printk.h b/include/linux/printk.h
69444index 9afc01e..92c32e8 100644
69445--- a/include/linux/printk.h
69446+++ b/include/linux/printk.h
69447@@ -101,6 +101,8 @@ void early_printk(const char *fmt, ...);
69448 extern int printk_needs_cpu(int cpu);
69449 extern void printk_tick(void);
69450
69451+extern int kptr_restrict;
69452+
69453 #ifdef CONFIG_PRINTK
69454 asmlinkage __printf(5, 0)
69455 int vprintk_emit(int facility, int level,
69456@@ -135,7 +137,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
69457
69458 extern int printk_delay_msec;
69459 extern int dmesg_restrict;
69460-extern int kptr_restrict;
69461
69462 void log_buf_kexec_setup(void);
69463 void __init setup_log_buf(int early);
69464diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
69465index 32676b3..8f7a182 100644
69466--- a/include/linux/proc_fs.h
69467+++ b/include/linux/proc_fs.h
69468@@ -159,6 +159,18 @@ static inline struct proc_dir_entry *proc_create(const char *name, umode_t mode,
69469 return proc_create_data(name, mode, parent, proc_fops, NULL);
69470 }
69471
69472+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
69473+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
69474+{
69475+#ifdef CONFIG_GRKERNSEC_PROC_USER
69476+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
69477+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69478+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
69479+#else
69480+ return proc_create_data(name, mode, parent, proc_fops, NULL);
69481+#endif
69482+}
69483+
69484 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
69485 umode_t mode, struct proc_dir_entry *base,
69486 read_proc_t *read_proc, void * data)
69487diff --git a/include/linux/random.h b/include/linux/random.h
69488index d984608..d6f0042 100644
69489--- a/include/linux/random.h
69490+++ b/include/linux/random.h
69491@@ -39,6 +39,11 @@ void prandom_seed(u32 seed);
69492 u32 prandom_u32_state(struct rnd_state *);
69493 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
69494
69495+static inline unsigned long pax_get_random_long(void)
69496+{
69497+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
69498+}
69499+
69500 /*
69501 * Handle minimum values for seeds
69502 */
69503diff --git a/include/linux/rculist.h b/include/linux/rculist.h
69504index c92dd28..08f4eab 100644
69505--- a/include/linux/rculist.h
69506+++ b/include/linux/rculist.h
69507@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
69508 struct list_head *prev, struct list_head *next);
69509 #endif
69510
69511+extern void __pax_list_add_rcu(struct list_head *new,
69512+ struct list_head *prev, struct list_head *next);
69513+
69514 /**
69515 * list_add_rcu - add a new entry to rcu-protected list
69516 * @new: new entry to be added
69517@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
69518 __list_add_rcu(new, head, head->next);
69519 }
69520
69521+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
69522+{
69523+ __pax_list_add_rcu(new, head, head->next);
69524+}
69525+
69526 /**
69527 * list_add_tail_rcu - add a new entry to rcu-protected list
69528 * @new: new entry to be added
69529@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
69530 __list_add_rcu(new, head->prev, head);
69531 }
69532
69533+static inline void pax_list_add_tail_rcu(struct list_head *new,
69534+ struct list_head *head)
69535+{
69536+ __pax_list_add_rcu(new, head->prev, head);
69537+}
69538+
69539 /**
69540 * list_del_rcu - deletes entry from list without re-initialization
69541 * @entry: the element to delete from the list.
69542@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
69543 entry->prev = LIST_POISON2;
69544 }
69545
69546+extern void pax_list_del_rcu(struct list_head *entry);
69547+
69548 /**
69549 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
69550 * @n: the element to delete from the hash list.
69551diff --git a/include/linux/reboot.h b/include/linux/reboot.h
69552index 23b3630..e1bc12b 100644
69553--- a/include/linux/reboot.h
69554+++ b/include/linux/reboot.h
69555@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
69556 * Architecture-specific implementations of sys_reboot commands.
69557 */
69558
69559-extern void machine_restart(char *cmd);
69560-extern void machine_halt(void);
69561-extern void machine_power_off(void);
69562+extern void machine_restart(char *cmd) __noreturn;
69563+extern void machine_halt(void) __noreturn;
69564+extern void machine_power_off(void) __noreturn;
69565
69566 extern void machine_shutdown(void);
69567 struct pt_regs;
69568@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
69569 */
69570
69571 extern void kernel_restart_prepare(char *cmd);
69572-extern void kernel_restart(char *cmd);
69573-extern void kernel_halt(void);
69574-extern void kernel_power_off(void);
69575+extern void kernel_restart(char *cmd) __noreturn;
69576+extern void kernel_halt(void) __noreturn;
69577+extern void kernel_power_off(void) __noreturn;
69578
69579 extern int C_A_D; /* for sysctl */
69580 void ctrl_alt_del(void);
69581@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
69582 * Emergency restart, callable from an interrupt handler.
69583 */
69584
69585-extern void emergency_restart(void);
69586+extern void emergency_restart(void) __noreturn;
69587 #include <asm/emergency-restart.h>
69588
69589 #endif /* _LINUX_REBOOT_H */
69590diff --git a/include/linux/regset.h b/include/linux/regset.h
69591index 8e0c9fe..ac4d221 100644
69592--- a/include/linux/regset.h
69593+++ b/include/linux/regset.h
69594@@ -161,7 +161,8 @@ struct user_regset {
69595 unsigned int align;
69596 unsigned int bias;
69597 unsigned int core_note_type;
69598-};
69599+} __do_const;
69600+typedef struct user_regset __no_const user_regset_no_const;
69601
69602 /**
69603 * struct user_regset_view - available regsets
69604diff --git a/include/linux/relay.h b/include/linux/relay.h
69605index 91cacc3..b55ff74 100644
69606--- a/include/linux/relay.h
69607+++ b/include/linux/relay.h
69608@@ -160,7 +160,7 @@ struct rchan_callbacks
69609 * The callback should return 0 if successful, negative if not.
69610 */
69611 int (*remove_buf_file)(struct dentry *dentry);
69612-};
69613+} __no_const;
69614
69615 /*
69616 * CONFIG_RELAY kernel API, kernel/relay.c
69617diff --git a/include/linux/rio.h b/include/linux/rio.h
69618index a3e7842..d973ca6 100644
69619--- a/include/linux/rio.h
69620+++ b/include/linux/rio.h
69621@@ -339,7 +339,7 @@ struct rio_ops {
69622 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
69623 u64 rstart, u32 size, u32 flags);
69624 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
69625-};
69626+} __no_const;
69627
69628 #define RIO_RESOURCE_MEM 0x00000100
69629 #define RIO_RESOURCE_DOORBELL 0x00000200
69630diff --git a/include/linux/rmap.h b/include/linux/rmap.h
69631index c20635c..2f5def4 100644
69632--- a/include/linux/rmap.h
69633+++ b/include/linux/rmap.h
69634@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
69635 void anon_vma_init(void); /* create anon_vma_cachep */
69636 int anon_vma_prepare(struct vm_area_struct *);
69637 void unlink_anon_vmas(struct vm_area_struct *);
69638-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
69639-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
69640+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
69641+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
69642
69643 static inline void anon_vma_merge(struct vm_area_struct *vma,
69644 struct vm_area_struct *next)
69645diff --git a/include/linux/sched.h b/include/linux/sched.h
69646index d211247..a5cbf38b 100644
69647--- a/include/linux/sched.h
69648+++ b/include/linux/sched.h
69649@@ -61,6 +61,7 @@ struct bio_list;
69650 struct fs_struct;
69651 struct perf_event_context;
69652 struct blk_plug;
69653+struct linux_binprm;
69654
69655 /*
69656 * List of flags we want to share for kernel threads,
69657@@ -354,10 +355,23 @@ struct user_namespace;
69658 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
69659
69660 extern int sysctl_max_map_count;
69661+extern unsigned long sysctl_heap_stack_gap;
69662
69663 #include <linux/aio.h>
69664
69665 #ifdef CONFIG_MMU
69666+
69667+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
69668+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
69669+#else
69670+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
69671+{
69672+ return 0;
69673+}
69674+#endif
69675+
69676+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
69677+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
69678 extern void arch_pick_mmap_layout(struct mm_struct *mm);
69679 extern unsigned long
69680 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
69681@@ -639,6 +653,17 @@ struct signal_struct {
69682 #ifdef CONFIG_TASKSTATS
69683 struct taskstats *stats;
69684 #endif
69685+
69686+#ifdef CONFIG_GRKERNSEC
69687+ u32 curr_ip;
69688+ u32 saved_ip;
69689+ u32 gr_saddr;
69690+ u32 gr_daddr;
69691+ u16 gr_sport;
69692+ u16 gr_dport;
69693+ u8 used_accept:1;
69694+#endif
69695+
69696 #ifdef CONFIG_AUDIT
69697 unsigned audit_tty;
69698 struct tty_audit_buf *tty_audit_buf;
69699@@ -717,6 +742,11 @@ struct user_struct {
69700 struct key *session_keyring; /* UID's default session keyring */
69701 #endif
69702
69703+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
69704+ unsigned int banned;
69705+ unsigned long ban_expires;
69706+#endif
69707+
69708 /* Hash table maintenance information */
69709 struct hlist_node uidhash_node;
69710 kuid_t uid;
69711@@ -1116,7 +1146,7 @@ struct sched_class {
69712 #ifdef CONFIG_FAIR_GROUP_SCHED
69713 void (*task_move_group) (struct task_struct *p, int on_rq);
69714 #endif
69715-};
69716+} __do_const;
69717
69718 struct load_weight {
69719 unsigned long weight, inv_weight;
69720@@ -1360,8 +1390,8 @@ struct task_struct {
69721 struct list_head thread_group;
69722
69723 struct completion *vfork_done; /* for vfork() */
69724- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
69725- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69726+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
69727+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
69728
69729 cputime_t utime, stime, utimescaled, stimescaled;
69730 cputime_t gtime;
69731@@ -1377,11 +1407,6 @@ struct task_struct {
69732 struct task_cputime cputime_expires;
69733 struct list_head cpu_timers[3];
69734
69735-/* process credentials */
69736- const struct cred __rcu *real_cred; /* objective and real subjective task
69737- * credentials (COW) */
69738- const struct cred __rcu *cred; /* effective (overridable) subjective task
69739- * credentials (COW) */
69740 char comm[TASK_COMM_LEN]; /* executable name excluding path
69741 - access with [gs]et_task_comm (which lock
69742 it with task_lock())
69743@@ -1398,6 +1423,10 @@ struct task_struct {
69744 #endif
69745 /* CPU-specific state of this task */
69746 struct thread_struct thread;
69747+/* thread_info moved to task_struct */
69748+#ifdef CONFIG_X86
69749+ struct thread_info tinfo;
69750+#endif
69751 /* filesystem information */
69752 struct fs_struct *fs;
69753 /* open file information */
69754@@ -1471,6 +1500,10 @@ struct task_struct {
69755 gfp_t lockdep_reclaim_gfp;
69756 #endif
69757
69758+/* process credentials */
69759+ const struct cred __rcu *real_cred; /* objective and real subjective task
69760+ * credentials (COW) */
69761+
69762 /* journalling filesystem info */
69763 void *journal_info;
69764
69765@@ -1509,6 +1542,10 @@ struct task_struct {
69766 /* cg_list protected by css_set_lock and tsk->alloc_lock */
69767 struct list_head cg_list;
69768 #endif
69769+
69770+ const struct cred __rcu *cred; /* effective (overridable) subjective task
69771+ * credentials (COW) */
69772+
69773 #ifdef CONFIG_FUTEX
69774 struct robust_list_head __user *robust_list;
69775 #ifdef CONFIG_COMPAT
69776@@ -1605,8 +1642,74 @@ struct task_struct {
69777 #ifdef CONFIG_UPROBES
69778 struct uprobe_task *utask;
69779 #endif
69780+
69781+#ifdef CONFIG_GRKERNSEC
69782+ /* grsecurity */
69783+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69784+ u64 exec_id;
69785+#endif
69786+#ifdef CONFIG_GRKERNSEC_SETXID
69787+ const struct cred *delayed_cred;
69788+#endif
69789+ struct dentry *gr_chroot_dentry;
69790+ struct acl_subject_label *acl;
69791+ struct acl_role_label *role;
69792+ struct file *exec_file;
69793+ unsigned long brute_expires;
69794+ u16 acl_role_id;
69795+ /* is this the task that authenticated to the special role */
69796+ u8 acl_sp_role;
69797+ u8 is_writable;
69798+ u8 brute;
69799+ u8 gr_is_chrooted;
69800+#endif
69801+
69802 };
69803
69804+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
69805+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
69806+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
69807+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
69808+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
69809+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
69810+
69811+#ifdef CONFIG_PAX_SOFTMODE
69812+extern int pax_softmode;
69813+#endif
69814+
69815+extern int pax_check_flags(unsigned long *);
69816+
69817+/* if tsk != current then task_lock must be held on it */
69818+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
69819+static inline unsigned long pax_get_flags(struct task_struct *tsk)
69820+{
69821+ if (likely(tsk->mm))
69822+ return tsk->mm->pax_flags;
69823+ else
69824+ return 0UL;
69825+}
69826+
69827+/* if tsk != current then task_lock must be held on it */
69828+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
69829+{
69830+ if (likely(tsk->mm)) {
69831+ tsk->mm->pax_flags = flags;
69832+ return 0;
69833+ }
69834+ return -EINVAL;
69835+}
69836+#endif
69837+
69838+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69839+extern void pax_set_initial_flags(struct linux_binprm *bprm);
69840+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69841+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69842+#endif
69843+
69844+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69845+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69846+extern void pax_report_refcount_overflow(struct pt_regs *regs);
69847+
69848 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69849 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
69850
69851@@ -1696,7 +1799,7 @@ struct pid_namespace;
69852 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
69853 struct pid_namespace *ns);
69854
69855-static inline pid_t task_pid_nr(struct task_struct *tsk)
69856+static inline pid_t task_pid_nr(const struct task_struct *tsk)
69857 {
69858 return tsk->pid;
69859 }
69860@@ -2155,7 +2258,9 @@ void yield(void);
69861 extern struct exec_domain default_exec_domain;
69862
69863 union thread_union {
69864+#ifndef CONFIG_X86
69865 struct thread_info thread_info;
69866+#endif
69867 unsigned long stack[THREAD_SIZE/sizeof(long)];
69868 };
69869
69870@@ -2188,6 +2293,7 @@ extern struct pid_namespace init_pid_ns;
69871 */
69872
69873 extern struct task_struct *find_task_by_vpid(pid_t nr);
69874+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69875 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69876 struct pid_namespace *ns);
69877
69878@@ -2344,7 +2450,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69879 extern void exit_itimers(struct signal_struct *);
69880 extern void flush_itimer_signals(void);
69881
69882-extern void do_group_exit(int);
69883+extern __noreturn void do_group_exit(int);
69884
69885 extern int allow_signal(int);
69886 extern int disallow_signal(int);
69887@@ -2545,9 +2651,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69888
69889 #endif
69890
69891-static inline int object_is_on_stack(void *obj)
69892+static inline int object_starts_on_stack(void *obj)
69893 {
69894- void *stack = task_stack_page(current);
69895+ const void *stack = task_stack_page(current);
69896
69897 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69898 }
69899diff --git a/include/linux/security.h b/include/linux/security.h
69900index eee7478..290f7ba 100644
69901--- a/include/linux/security.h
69902+++ b/include/linux/security.h
69903@@ -26,6 +26,7 @@
69904 #include <linux/capability.h>
69905 #include <linux/slab.h>
69906 #include <linux/err.h>
69907+#include <linux/grsecurity.h>
69908
69909 struct linux_binprm;
69910 struct cred;
69911diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69912index 68a04a3..866e6a1 100644
69913--- a/include/linux/seq_file.h
69914+++ b/include/linux/seq_file.h
69915@@ -26,6 +26,9 @@ struct seq_file {
69916 struct mutex lock;
69917 const struct seq_operations *op;
69918 int poll_event;
69919+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69920+ u64 exec_id;
69921+#endif
69922 #ifdef CONFIG_USER_NS
69923 struct user_namespace *user_ns;
69924 #endif
69925@@ -38,6 +41,7 @@ struct seq_operations {
69926 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69927 int (*show) (struct seq_file *m, void *v);
69928 };
69929+typedef struct seq_operations __no_const seq_operations_no_const;
69930
69931 #define SEQ_SKIP 1
69932
69933diff --git a/include/linux/shm.h b/include/linux/shm.h
69934index 429c199..4d42e38 100644
69935--- a/include/linux/shm.h
69936+++ b/include/linux/shm.h
69937@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
69938
69939 /* The task created the shm object. NULL if the task is dead. */
69940 struct task_struct *shm_creator;
69941+#ifdef CONFIG_GRKERNSEC
69942+ time_t shm_createtime;
69943+ pid_t shm_lapid;
69944+#endif
69945 };
69946
69947 /* shm_mode upper byte flags */
69948diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69949index 320e976..fd52553 100644
69950--- a/include/linux/skbuff.h
69951+++ b/include/linux/skbuff.h
69952@@ -590,7 +590,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
69953 extern struct sk_buff *__alloc_skb(unsigned int size,
69954 gfp_t priority, int flags, int node);
69955 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
69956-static inline struct sk_buff *alloc_skb(unsigned int size,
69957+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
69958 gfp_t priority)
69959 {
69960 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
69961@@ -700,7 +700,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
69962 */
69963 static inline int skb_queue_empty(const struct sk_buff_head *list)
69964 {
69965- return list->next == (struct sk_buff *)list;
69966+ return list->next == (const struct sk_buff *)list;
69967 }
69968
69969 /**
69970@@ -713,7 +713,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69971 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69972 const struct sk_buff *skb)
69973 {
69974- return skb->next == (struct sk_buff *)list;
69975+ return skb->next == (const struct sk_buff *)list;
69976 }
69977
69978 /**
69979@@ -726,7 +726,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69980 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69981 const struct sk_buff *skb)
69982 {
69983- return skb->prev == (struct sk_buff *)list;
69984+ return skb->prev == (const struct sk_buff *)list;
69985 }
69986
69987 /**
69988@@ -1722,7 +1722,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
69989 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
69990 */
69991 #ifndef NET_SKB_PAD
69992-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
69993+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
69994 #endif
69995
69996 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69997@@ -2300,7 +2300,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
69998 int noblock, int *err);
69999 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
70000 struct poll_table_struct *wait);
70001-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
70002+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
70003 int offset, struct iovec *to,
70004 int size);
70005 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
70006diff --git a/include/linux/slab.h b/include/linux/slab.h
70007index 5d168d7..720bff3 100644
70008--- a/include/linux/slab.h
70009+++ b/include/linux/slab.h
70010@@ -12,13 +12,20 @@
70011 #include <linux/gfp.h>
70012 #include <linux/types.h>
70013 #include <linux/workqueue.h>
70014-
70015+#include <linux/err.h>
70016
70017 /*
70018 * Flags to pass to kmem_cache_create().
70019 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
70020 */
70021 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
70022+
70023+#ifdef CONFIG_PAX_USERCOPY_SLABS
70024+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
70025+#else
70026+#define SLAB_USERCOPY 0x00000000UL
70027+#endif
70028+
70029 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
70030 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
70031 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
70032@@ -89,10 +96,13 @@
70033 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
70034 * Both make kfree a no-op.
70035 */
70036-#define ZERO_SIZE_PTR ((void *)16)
70037+#define ZERO_SIZE_PTR \
70038+({ \
70039+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
70040+ (void *)(-MAX_ERRNO-1L); \
70041+})
70042
70043-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
70044- (unsigned long)ZERO_SIZE_PTR)
70045+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
70046
70047 /*
70048 * Common fields provided in kmem_cache by all slab allocators
70049@@ -112,7 +122,7 @@ struct kmem_cache {
70050 unsigned int align; /* Alignment as calculated */
70051 unsigned long flags; /* Active flags on the slab */
70052 const char *name; /* Slab name for sysfs */
70053- int refcount; /* Use counter */
70054+ atomic_t refcount; /* Use counter */
70055 void (*ctor)(void *); /* Called on object slot creation */
70056 struct list_head list; /* List of all slab caches on the system */
70057 };
70058@@ -232,6 +242,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
70059 void kfree(const void *);
70060 void kzfree(const void *);
70061 size_t ksize(const void *);
70062+const char *check_heap_object(const void *ptr, unsigned long n);
70063+bool is_usercopy_object(const void *ptr);
70064
70065 /*
70066 * Allocator specific definitions. These are mainly used to establish optimized
70067@@ -311,6 +323,7 @@ size_t ksize(const void *);
70068 * for general use, and so are not documented here. For a full list of
70069 * potential flags, always refer to linux/gfp.h.
70070 */
70071+
70072 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
70073 {
70074 if (size != 0 && n > SIZE_MAX / size)
70075@@ -370,7 +383,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
70076 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70077 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70078 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70079-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70080+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
70081 #define kmalloc_track_caller(size, flags) \
70082 __kmalloc_track_caller(size, flags, _RET_IP_)
70083 #else
70084@@ -390,7 +403,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
70085 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
70086 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
70087 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
70088-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
70089+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
70090 #define kmalloc_node_track_caller(size, flags, node) \
70091 __kmalloc_node_track_caller(size, flags, node, \
70092 _RET_IP_)
70093diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
70094index 8bb6e0e..8eb0dbe 100644
70095--- a/include/linux/slab_def.h
70096+++ b/include/linux/slab_def.h
70097@@ -52,7 +52,7 @@ struct kmem_cache {
70098 /* 4) cache creation/removal */
70099 const char *name;
70100 struct list_head list;
70101- int refcount;
70102+ atomic_t refcount;
70103 int object_size;
70104 int align;
70105
70106@@ -68,10 +68,10 @@ struct kmem_cache {
70107 unsigned long node_allocs;
70108 unsigned long node_frees;
70109 unsigned long node_overflow;
70110- atomic_t allochit;
70111- atomic_t allocmiss;
70112- atomic_t freehit;
70113- atomic_t freemiss;
70114+ atomic_unchecked_t allochit;
70115+ atomic_unchecked_t allocmiss;
70116+ atomic_unchecked_t freehit;
70117+ atomic_unchecked_t freemiss;
70118
70119 /*
70120 * If debugging is enabled, then the allocator can add additional
70121@@ -111,11 +111,16 @@ struct cache_sizes {
70122 #ifdef CONFIG_ZONE_DMA
70123 struct kmem_cache *cs_dmacachep;
70124 #endif
70125+
70126+#ifdef CONFIG_PAX_USERCOPY_SLABS
70127+ struct kmem_cache *cs_usercopycachep;
70128+#endif
70129+
70130 };
70131 extern struct cache_sizes malloc_sizes[];
70132
70133 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70134-void *__kmalloc(size_t size, gfp_t flags);
70135+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
70136
70137 #ifdef CONFIG_TRACING
70138 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
70139@@ -152,6 +157,13 @@ found:
70140 cachep = malloc_sizes[i].cs_dmacachep;
70141 else
70142 #endif
70143+
70144+#ifdef CONFIG_PAX_USERCOPY_SLABS
70145+ if (flags & GFP_USERCOPY)
70146+ cachep = malloc_sizes[i].cs_usercopycachep;
70147+ else
70148+#endif
70149+
70150 cachep = malloc_sizes[i].cs_cachep;
70151
70152 ret = kmem_cache_alloc_trace(cachep, flags, size);
70153@@ -162,7 +174,7 @@ found:
70154 }
70155
70156 #ifdef CONFIG_NUMA
70157-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
70158+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70159 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
70160
70161 #ifdef CONFIG_TRACING
70162@@ -205,6 +217,13 @@ found:
70163 cachep = malloc_sizes[i].cs_dmacachep;
70164 else
70165 #endif
70166+
70167+#ifdef CONFIG_PAX_USERCOPY_SLABS
70168+ if (flags & GFP_USERCOPY)
70169+ cachep = malloc_sizes[i].cs_usercopycachep;
70170+ else
70171+#endif
70172+
70173 cachep = malloc_sizes[i].cs_cachep;
70174
70175 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
70176diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
70177index f28e14a..7831211 100644
70178--- a/include/linux/slob_def.h
70179+++ b/include/linux/slob_def.h
70180@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
70181 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
70182 }
70183
70184-void *__kmalloc_node(size_t size, gfp_t flags, int node);
70185+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70186
70187 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
70188 {
70189@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
70190 return __kmalloc_node(size, flags, NUMA_NO_NODE);
70191 }
70192
70193-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
70194+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
70195 {
70196 return kmalloc(size, flags);
70197 }
70198diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
70199index 9db4825..ed42fb5 100644
70200--- a/include/linux/slub_def.h
70201+++ b/include/linux/slub_def.h
70202@@ -91,7 +91,7 @@ struct kmem_cache {
70203 struct kmem_cache_order_objects max;
70204 struct kmem_cache_order_objects min;
70205 gfp_t allocflags; /* gfp flags to use on each alloc */
70206- int refcount; /* Refcount for slab cache destroy */
70207+ atomic_t refcount; /* Refcount for slab cache destroy */
70208 void (*ctor)(void *);
70209 int inuse; /* Offset to metadata */
70210 int align; /* Alignment */
70211@@ -156,7 +156,7 @@ extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
70212 * Sorry that the following has to be that ugly but some versions of GCC
70213 * have trouble with constant propagation and loops.
70214 */
70215-static __always_inline int kmalloc_index(size_t size)
70216+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
70217 {
70218 if (!size)
70219 return 0;
70220@@ -221,7 +221,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
70221 }
70222
70223 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
70224-void *__kmalloc(size_t size, gfp_t flags);
70225+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
70226
70227 static __always_inline void *
70228 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
70229@@ -265,7 +265,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
70230 }
70231 #endif
70232
70233-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
70234+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
70235 {
70236 unsigned int order = get_order(size);
70237 return kmalloc_order_trace(size, flags, order);
70238@@ -290,7 +290,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
70239 }
70240
70241 #ifdef CONFIG_NUMA
70242-void *__kmalloc_node(size_t size, gfp_t flags, int node);
70243+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
70244 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
70245
70246 #ifdef CONFIG_TRACING
70247diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
70248index e8d702e..0a56eb4 100644
70249--- a/include/linux/sock_diag.h
70250+++ b/include/linux/sock_diag.h
70251@@ -10,7 +10,7 @@ struct sock;
70252 struct sock_diag_handler {
70253 __u8 family;
70254 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
70255-};
70256+} __do_const;
70257
70258 int sock_diag_register(const struct sock_diag_handler *h);
70259 void sock_diag_unregister(const struct sock_diag_handler *h);
70260diff --git a/include/linux/sonet.h b/include/linux/sonet.h
70261index 680f9a3..f13aeb0 100644
70262--- a/include/linux/sonet.h
70263+++ b/include/linux/sonet.h
70264@@ -7,7 +7,7 @@
70265 #include <uapi/linux/sonet.h>
70266
70267 struct k_sonet_stats {
70268-#define __HANDLE_ITEM(i) atomic_t i
70269+#define __HANDLE_ITEM(i) atomic_unchecked_t i
70270 __SONET_ITEMS
70271 #undef __HANDLE_ITEM
70272 };
70273diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
70274index 34206b8..3db7f1c 100644
70275--- a/include/linux/sunrpc/clnt.h
70276+++ b/include/linux/sunrpc/clnt.h
70277@@ -96,7 +96,7 @@ struct rpc_procinfo {
70278 unsigned int p_timer; /* Which RTT timer to use */
70279 u32 p_statidx; /* Which procedure to account */
70280 const char * p_name; /* name of procedure */
70281-};
70282+} __do_const;
70283
70284 #ifdef __KERNEL__
70285
70286@@ -176,9 +176,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
70287 {
70288 switch (sap->sa_family) {
70289 case AF_INET:
70290- return ntohs(((struct sockaddr_in *)sap)->sin_port);
70291+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
70292 case AF_INET6:
70293- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
70294+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
70295 }
70296 return 0;
70297 }
70298@@ -211,7 +211,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
70299 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
70300 const struct sockaddr *src)
70301 {
70302- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
70303+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
70304 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
70305
70306 dsin->sin_family = ssin->sin_family;
70307@@ -314,7 +314,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
70308 if (sa->sa_family != AF_INET6)
70309 return 0;
70310
70311- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
70312+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
70313 }
70314
70315 #endif /* __KERNEL__ */
70316diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
70317index 676ddf5..4c519a1 100644
70318--- a/include/linux/sunrpc/svc.h
70319+++ b/include/linux/sunrpc/svc.h
70320@@ -410,7 +410,7 @@ struct svc_procedure {
70321 unsigned int pc_count; /* call count */
70322 unsigned int pc_cachetype; /* cache info (NFS) */
70323 unsigned int pc_xdrressize; /* maximum size of XDR reply */
70324-};
70325+} __do_const;
70326
70327 /*
70328 * Function prototypes.
70329diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
70330index 0b8e3e6..33e0a01 100644
70331--- a/include/linux/sunrpc/svc_rdma.h
70332+++ b/include/linux/sunrpc/svc_rdma.h
70333@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
70334 extern unsigned int svcrdma_max_requests;
70335 extern unsigned int svcrdma_max_req_size;
70336
70337-extern atomic_t rdma_stat_recv;
70338-extern atomic_t rdma_stat_read;
70339-extern atomic_t rdma_stat_write;
70340-extern atomic_t rdma_stat_sq_starve;
70341-extern atomic_t rdma_stat_rq_starve;
70342-extern atomic_t rdma_stat_rq_poll;
70343-extern atomic_t rdma_stat_rq_prod;
70344-extern atomic_t rdma_stat_sq_poll;
70345-extern atomic_t rdma_stat_sq_prod;
70346+extern atomic_unchecked_t rdma_stat_recv;
70347+extern atomic_unchecked_t rdma_stat_read;
70348+extern atomic_unchecked_t rdma_stat_write;
70349+extern atomic_unchecked_t rdma_stat_sq_starve;
70350+extern atomic_unchecked_t rdma_stat_rq_starve;
70351+extern atomic_unchecked_t rdma_stat_rq_poll;
70352+extern atomic_unchecked_t rdma_stat_rq_prod;
70353+extern atomic_unchecked_t rdma_stat_sq_poll;
70354+extern atomic_unchecked_t rdma_stat_sq_prod;
70355
70356 #define RPCRDMA_VERSION 1
70357
70358diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
70359index dd74084a..7f509d5 100644
70360--- a/include/linux/sunrpc/svcauth.h
70361+++ b/include/linux/sunrpc/svcauth.h
70362@@ -109,7 +109,7 @@ struct auth_ops {
70363 int (*release)(struct svc_rqst *rq);
70364 void (*domain_release)(struct auth_domain *);
70365 int (*set_client)(struct svc_rqst *rq);
70366-};
70367+} __do_const;
70368
70369 #define SVC_GARBAGE 1
70370 #define SVC_SYSERR 2
70371diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
70372index 071d62c..4ccc7ac 100644
70373--- a/include/linux/swiotlb.h
70374+++ b/include/linux/swiotlb.h
70375@@ -59,7 +59,8 @@ extern void
70376
70377 extern void
70378 swiotlb_free_coherent(struct device *hwdev, size_t size,
70379- void *vaddr, dma_addr_t dma_handle);
70380+ void *vaddr, dma_addr_t dma_handle,
70381+ struct dma_attrs *attrs);
70382
70383 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
70384 unsigned long offset, size_t size,
70385diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
70386index 27b3b0b..e093dd9 100644
70387--- a/include/linux/syscore_ops.h
70388+++ b/include/linux/syscore_ops.h
70389@@ -16,7 +16,7 @@ struct syscore_ops {
70390 int (*suspend)(void);
70391 void (*resume)(void);
70392 void (*shutdown)(void);
70393-};
70394+} __do_const;
70395
70396 extern void register_syscore_ops(struct syscore_ops *ops);
70397 extern void unregister_syscore_ops(struct syscore_ops *ops);
70398diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
70399index 14a8ff2..af52bad 100644
70400--- a/include/linux/sysctl.h
70401+++ b/include/linux/sysctl.h
70402@@ -34,13 +34,13 @@ struct ctl_table_root;
70403 struct ctl_table_header;
70404 struct ctl_dir;
70405
70406-typedef struct ctl_table ctl_table;
70407-
70408 typedef int proc_handler (struct ctl_table *ctl, int write,
70409 void __user *buffer, size_t *lenp, loff_t *ppos);
70410
70411 extern int proc_dostring(struct ctl_table *, int,
70412 void __user *, size_t *, loff_t *);
70413+extern int proc_dostring_modpriv(struct ctl_table *, int,
70414+ void __user *, size_t *, loff_t *);
70415 extern int proc_dointvec(struct ctl_table *, int,
70416 void __user *, size_t *, loff_t *);
70417 extern int proc_dointvec_minmax(struct ctl_table *, int,
70418@@ -115,7 +115,9 @@ struct ctl_table
70419 struct ctl_table_poll *poll;
70420 void *extra1;
70421 void *extra2;
70422-};
70423+} __do_const;
70424+typedef struct ctl_table __no_const ctl_table_no_const;
70425+typedef struct ctl_table ctl_table;
70426
70427 struct ctl_node {
70428 struct rb_node node;
70429diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
70430index 381f06d..dc16cc7 100644
70431--- a/include/linux/sysfs.h
70432+++ b/include/linux/sysfs.h
70433@@ -31,7 +31,8 @@ struct attribute {
70434 struct lock_class_key *key;
70435 struct lock_class_key skey;
70436 #endif
70437-};
70438+} __do_const;
70439+typedef struct attribute __no_const attribute_no_const;
70440
70441 /**
70442 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
70443@@ -59,8 +60,8 @@ struct attribute_group {
70444 umode_t (*is_visible)(struct kobject *,
70445 struct attribute *, int);
70446 struct attribute **attrs;
70447-};
70448-
70449+} __do_const;
70450+typedef struct attribute_group __no_const attribute_group_no_const;
70451
70452
70453 /**
70454@@ -107,7 +108,8 @@ struct bin_attribute {
70455 char *, loff_t, size_t);
70456 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
70457 struct vm_area_struct *vma);
70458-};
70459+} __do_const;
70460+typedef struct bin_attribute __no_const bin_attribute_no_const;
70461
70462 /**
70463 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
70464diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
70465index 7faf933..4657127 100644
70466--- a/include/linux/sysrq.h
70467+++ b/include/linux/sysrq.h
70468@@ -15,7 +15,9 @@
70469 #define _LINUX_SYSRQ_H
70470
70471 #include <linux/errno.h>
70472+#include <linux/compiler.h>
70473 #include <linux/types.h>
70474+#include <linux/compiler.h>
70475
70476 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
70477 #define SYSRQ_DEFAULT_ENABLE 1
70478@@ -36,7 +38,7 @@ struct sysrq_key_op {
70479 char *help_msg;
70480 char *action_msg;
70481 int enable_mask;
70482-};
70483+} __do_const;
70484
70485 #ifdef CONFIG_MAGIC_SYSRQ
70486
70487diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
70488index e7e0473..39b7b52 100644
70489--- a/include/linux/thread_info.h
70490+++ b/include/linux/thread_info.h
70491@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
70492 #error "no set_restore_sigmask() provided and default one won't work"
70493 #endif
70494
70495+extern void __check_object_size(const void *ptr, unsigned long n, bool to);
70496+static inline void check_object_size(const void *ptr, unsigned long n, bool to)
70497+{
70498+#ifndef CONFIG_PAX_USERCOPY_DEBUG
70499+ if (!__builtin_constant_p(n))
70500+#endif
70501+ __check_object_size(ptr, n, to);
70502+}
70503+
70504 #endif /* __KERNEL__ */
70505
70506 #endif /* _LINUX_THREAD_INFO_H */
70507diff --git a/include/linux/tty.h b/include/linux/tty.h
70508index 8db1b56..c16a040 100644
70509--- a/include/linux/tty.h
70510+++ b/include/linux/tty.h
70511@@ -194,7 +194,7 @@ struct tty_port {
70512 const struct tty_port_operations *ops; /* Port operations */
70513 spinlock_t lock; /* Lock protecting tty field */
70514 int blocked_open; /* Waiting to open */
70515- int count; /* Usage count */
70516+ atomic_t count; /* Usage count */
70517 wait_queue_head_t open_wait; /* Open waiters */
70518 wait_queue_head_t close_wait; /* Close waiters */
70519 wait_queue_head_t delta_msr_wait; /* Modem status change */
70520@@ -490,7 +490,7 @@ extern int tty_port_open(struct tty_port *port,
70521 struct tty_struct *tty, struct file *filp);
70522 static inline int tty_port_users(struct tty_port *port)
70523 {
70524- return port->count + port->blocked_open;
70525+ return atomic_read(&port->count) + port->blocked_open;
70526 }
70527
70528 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
70529diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
70530index dd976cf..e272742 100644
70531--- a/include/linux/tty_driver.h
70532+++ b/include/linux/tty_driver.h
70533@@ -284,7 +284,7 @@ struct tty_operations {
70534 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
70535 #endif
70536 const struct file_operations *proc_fops;
70537-};
70538+} __do_const;
70539
70540 struct tty_driver {
70541 int magic; /* magic number for this structure */
70542diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
70543index fb79dd8d..07d4773 100644
70544--- a/include/linux/tty_ldisc.h
70545+++ b/include/linux/tty_ldisc.h
70546@@ -149,7 +149,7 @@ struct tty_ldisc_ops {
70547
70548 struct module *owner;
70549
70550- int refcount;
70551+ atomic_t refcount;
70552 };
70553
70554 struct tty_ldisc {
70555diff --git a/include/linux/types.h b/include/linux/types.h
70556index 4d118ba..c3ee9bf 100644
70557--- a/include/linux/types.h
70558+++ b/include/linux/types.h
70559@@ -176,10 +176,26 @@ typedef struct {
70560 int counter;
70561 } atomic_t;
70562
70563+#ifdef CONFIG_PAX_REFCOUNT
70564+typedef struct {
70565+ int counter;
70566+} atomic_unchecked_t;
70567+#else
70568+typedef atomic_t atomic_unchecked_t;
70569+#endif
70570+
70571 #ifdef CONFIG_64BIT
70572 typedef struct {
70573 long counter;
70574 } atomic64_t;
70575+
70576+#ifdef CONFIG_PAX_REFCOUNT
70577+typedef struct {
70578+ long counter;
70579+} atomic64_unchecked_t;
70580+#else
70581+typedef atomic64_t atomic64_unchecked_t;
70582+#endif
70583 #endif
70584
70585 struct list_head {
70586diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
70587index 5ca0951..ab496a5 100644
70588--- a/include/linux/uaccess.h
70589+++ b/include/linux/uaccess.h
70590@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
70591 long ret; \
70592 mm_segment_t old_fs = get_fs(); \
70593 \
70594- set_fs(KERNEL_DS); \
70595 pagefault_disable(); \
70596- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
70597- pagefault_enable(); \
70598+ set_fs(KERNEL_DS); \
70599+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
70600 set_fs(old_fs); \
70601+ pagefault_enable(); \
70602 ret; \
70603 })
70604
70605diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
70606index 8e522cbc..aa8572d 100644
70607--- a/include/linux/uidgid.h
70608+++ b/include/linux/uidgid.h
70609@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
70610
70611 #endif /* CONFIG_USER_NS */
70612
70613+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
70614+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
70615+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
70616+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
70617+
70618 #endif /* _LINUX_UIDGID_H */
70619diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
70620index 99c1b4d..bb94261 100644
70621--- a/include/linux/unaligned/access_ok.h
70622+++ b/include/linux/unaligned/access_ok.h
70623@@ -6,32 +6,32 @@
70624
70625 static inline u16 get_unaligned_le16(const void *p)
70626 {
70627- return le16_to_cpup((__le16 *)p);
70628+ return le16_to_cpup((const __le16 *)p);
70629 }
70630
70631 static inline u32 get_unaligned_le32(const void *p)
70632 {
70633- return le32_to_cpup((__le32 *)p);
70634+ return le32_to_cpup((const __le32 *)p);
70635 }
70636
70637 static inline u64 get_unaligned_le64(const void *p)
70638 {
70639- return le64_to_cpup((__le64 *)p);
70640+ return le64_to_cpup((const __le64 *)p);
70641 }
70642
70643 static inline u16 get_unaligned_be16(const void *p)
70644 {
70645- return be16_to_cpup((__be16 *)p);
70646+ return be16_to_cpup((const __be16 *)p);
70647 }
70648
70649 static inline u32 get_unaligned_be32(const void *p)
70650 {
70651- return be32_to_cpup((__be32 *)p);
70652+ return be32_to_cpup((const __be32 *)p);
70653 }
70654
70655 static inline u64 get_unaligned_be64(const void *p)
70656 {
70657- return be64_to_cpup((__be64 *)p);
70658+ return be64_to_cpup((const __be64 *)p);
70659 }
70660
70661 static inline void put_unaligned_le16(u16 val, void *p)
70662diff --git a/include/linux/usb.h b/include/linux/usb.h
70663index 4d22d0f..ac43c2f 100644
70664--- a/include/linux/usb.h
70665+++ b/include/linux/usb.h
70666@@ -554,7 +554,7 @@ struct usb_device {
70667 int maxchild;
70668
70669 u32 quirks;
70670- atomic_t urbnum;
70671+ atomic_unchecked_t urbnum;
70672
70673 unsigned long active_duration;
70674
70675diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
70676index c5d36c6..108f4f9 100644
70677--- a/include/linux/usb/renesas_usbhs.h
70678+++ b/include/linux/usb/renesas_usbhs.h
70679@@ -39,7 +39,7 @@ enum {
70680 */
70681 struct renesas_usbhs_driver_callback {
70682 int (*notify_hotplug)(struct platform_device *pdev);
70683-};
70684+} __no_const;
70685
70686 /*
70687 * callback functions for platform
70688diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
70689index b9bd2e6..4ce0093 100644
70690--- a/include/linux/user_namespace.h
70691+++ b/include/linux/user_namespace.h
70692@@ -21,7 +21,7 @@ struct user_namespace {
70693 struct uid_gid_map uid_map;
70694 struct uid_gid_map gid_map;
70695 struct uid_gid_map projid_map;
70696- struct kref kref;
70697+ atomic_t count;
70698 struct user_namespace *parent;
70699 kuid_t owner;
70700 kgid_t group;
70701@@ -35,18 +35,18 @@ extern struct user_namespace init_user_ns;
70702 static inline struct user_namespace *get_user_ns(struct user_namespace *ns)
70703 {
70704 if (ns)
70705- kref_get(&ns->kref);
70706+ atomic_inc(&ns->count);
70707 return ns;
70708 }
70709
70710 extern int create_user_ns(struct cred *new);
70711 extern int unshare_userns(unsigned long unshare_flags, struct cred **new_cred);
70712-extern void free_user_ns(struct kref *kref);
70713+extern void free_user_ns(struct user_namespace *ns);
70714
70715 static inline void put_user_ns(struct user_namespace *ns)
70716 {
70717- if (ns)
70718- kref_put(&ns->kref, free_user_ns);
70719+ if (ns && atomic_dec_and_test(&ns->count))
70720+ free_user_ns(ns);
70721 }
70722
70723 struct seq_operations;
70724diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
70725index 6f8fbcf..8259001 100644
70726--- a/include/linux/vermagic.h
70727+++ b/include/linux/vermagic.h
70728@@ -25,9 +25,35 @@
70729 #define MODULE_ARCH_VERMAGIC ""
70730 #endif
70731
70732+#ifdef CONFIG_PAX_REFCOUNT
70733+#define MODULE_PAX_REFCOUNT "REFCOUNT "
70734+#else
70735+#define MODULE_PAX_REFCOUNT ""
70736+#endif
70737+
70738+#ifdef CONSTIFY_PLUGIN
70739+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
70740+#else
70741+#define MODULE_CONSTIFY_PLUGIN ""
70742+#endif
70743+
70744+#ifdef STACKLEAK_PLUGIN
70745+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
70746+#else
70747+#define MODULE_STACKLEAK_PLUGIN ""
70748+#endif
70749+
70750+#ifdef CONFIG_GRKERNSEC
70751+#define MODULE_GRSEC "GRSEC "
70752+#else
70753+#define MODULE_GRSEC ""
70754+#endif
70755+
70756 #define VERMAGIC_STRING \
70757 UTS_RELEASE " " \
70758 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
70759 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
70760- MODULE_ARCH_VERMAGIC
70761+ MODULE_ARCH_VERMAGIC \
70762+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
70763+ MODULE_GRSEC
70764
70765diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
70766index 6071e91..ca6a489 100644
70767--- a/include/linux/vmalloc.h
70768+++ b/include/linux/vmalloc.h
70769@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
70770 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
70771 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
70772 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
70773+
70774+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
70775+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
70776+#endif
70777+
70778 /* bits [20..32] reserved for arch specific ioremap internals */
70779
70780 /*
70781@@ -62,7 +67,7 @@ extern void *vmalloc_32_user(unsigned long size);
70782 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
70783 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
70784 unsigned long start, unsigned long end, gfp_t gfp_mask,
70785- pgprot_t prot, int node, const void *caller);
70786+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
70787 extern void vfree(const void *addr);
70788
70789 extern void *vmap(struct page **pages, unsigned int count,
70790@@ -124,8 +129,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
70791 extern void free_vm_area(struct vm_struct *area);
70792
70793 /* for /dev/kmem */
70794-extern long vread(char *buf, char *addr, unsigned long count);
70795-extern long vwrite(char *buf, char *addr, unsigned long count);
70796+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
70797+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
70798
70799 /*
70800 * Internals. Dont't use..
70801diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
70802index a13291f..af51fa3 100644
70803--- a/include/linux/vmstat.h
70804+++ b/include/linux/vmstat.h
70805@@ -95,18 +95,18 @@ static inline void vm_events_fold_cpu(int cpu)
70806 /*
70807 * Zone based page accounting with per cpu differentials.
70808 */
70809-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70810+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70811
70812 static inline void zone_page_state_add(long x, struct zone *zone,
70813 enum zone_stat_item item)
70814 {
70815- atomic_long_add(x, &zone->vm_stat[item]);
70816- atomic_long_add(x, &vm_stat[item]);
70817+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
70818+ atomic_long_add_unchecked(x, &vm_stat[item]);
70819 }
70820
70821 static inline unsigned long global_page_state(enum zone_stat_item item)
70822 {
70823- long x = atomic_long_read(&vm_stat[item]);
70824+ long x = atomic_long_read_unchecked(&vm_stat[item]);
70825 #ifdef CONFIG_SMP
70826 if (x < 0)
70827 x = 0;
70828@@ -117,7 +117,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70829 static inline unsigned long zone_page_state(struct zone *zone,
70830 enum zone_stat_item item)
70831 {
70832- long x = atomic_long_read(&zone->vm_stat[item]);
70833+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70834 #ifdef CONFIG_SMP
70835 if (x < 0)
70836 x = 0;
70837@@ -134,7 +134,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70838 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70839 enum zone_stat_item item)
70840 {
70841- long x = atomic_long_read(&zone->vm_stat[item]);
70842+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70843
70844 #ifdef CONFIG_SMP
70845 int cpu;
70846@@ -226,8 +226,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70847
70848 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70849 {
70850- atomic_long_inc(&zone->vm_stat[item]);
70851- atomic_long_inc(&vm_stat[item]);
70852+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
70853+ atomic_long_inc_unchecked(&vm_stat[item]);
70854 }
70855
70856 static inline void __inc_zone_page_state(struct page *page,
70857@@ -238,8 +238,8 @@ static inline void __inc_zone_page_state(struct page *page,
70858
70859 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70860 {
70861- atomic_long_dec(&zone->vm_stat[item]);
70862- atomic_long_dec(&vm_stat[item]);
70863+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
70864+ atomic_long_dec_unchecked(&vm_stat[item]);
70865 }
70866
70867 static inline void __dec_zone_page_state(struct page *page,
70868diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70869index fdbafc6..b7ffd47 100644
70870--- a/include/linux/xattr.h
70871+++ b/include/linux/xattr.h
70872@@ -28,7 +28,7 @@ struct xattr_handler {
70873 size_t size, int handler_flags);
70874 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
70875 size_t size, int flags, int handler_flags);
70876-};
70877+} __do_const;
70878
70879 struct xattr {
70880 char *name;
70881diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70882index 95d1c91..6798cca 100644
70883--- a/include/media/v4l2-dev.h
70884+++ b/include/media/v4l2-dev.h
70885@@ -76,7 +76,7 @@ struct v4l2_file_operations {
70886 int (*mmap) (struct file *, struct vm_area_struct *);
70887 int (*open) (struct file *);
70888 int (*release) (struct file *);
70889-};
70890+} __do_const;
70891
70892 /*
70893 * Newer version of video_device, handled by videodev2.c
70894diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70895index 4118ad1..cb7e25f 100644
70896--- a/include/media/v4l2-ioctl.h
70897+++ b/include/media/v4l2-ioctl.h
70898@@ -284,7 +284,6 @@ struct v4l2_ioctl_ops {
70899 bool valid_prio, int cmd, void *arg);
70900 };
70901
70902-
70903 /* v4l debugging and diagnostics */
70904
70905 /* Debug bitmask flags to be used on V4L2 */
70906diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
70907index adcbb20..62c2559 100644
70908--- a/include/net/9p/transport.h
70909+++ b/include/net/9p/transport.h
70910@@ -57,7 +57,7 @@ struct p9_trans_module {
70911 int (*cancel) (struct p9_client *, struct p9_req_t *req);
70912 int (*zc_request)(struct p9_client *, struct p9_req_t *,
70913 char *, char *, int , int, int, int);
70914-};
70915+} __do_const;
70916
70917 void v9fs_register_trans(struct p9_trans_module *m);
70918 void v9fs_unregister_trans(struct p9_trans_module *m);
70919diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
70920index 7588ef4..e62d35f 100644
70921--- a/include/net/bluetooth/l2cap.h
70922+++ b/include/net/bluetooth/l2cap.h
70923@@ -552,7 +552,7 @@ struct l2cap_ops {
70924 void (*defer) (struct l2cap_chan *chan);
70925 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
70926 unsigned long len, int nb);
70927-};
70928+} __do_const;
70929
70930 struct l2cap_conn {
70931 struct hci_conn *hcon;
70932diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
70933index 9e5425b..8136ffc 100644
70934--- a/include/net/caif/cfctrl.h
70935+++ b/include/net/caif/cfctrl.h
70936@@ -52,7 +52,7 @@ struct cfctrl_rsp {
70937 void (*radioset_rsp)(void);
70938 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
70939 struct cflayer *client_layer);
70940-};
70941+} __no_const;
70942
70943 /* Link Setup Parameters for CAIF-Links. */
70944 struct cfctrl_link_param {
70945@@ -101,8 +101,8 @@ struct cfctrl_request_info {
70946 struct cfctrl {
70947 struct cfsrvl serv;
70948 struct cfctrl_rsp res;
70949- atomic_t req_seq_no;
70950- atomic_t rsp_seq_no;
70951+ atomic_unchecked_t req_seq_no;
70952+ atomic_unchecked_t rsp_seq_no;
70953 struct list_head list;
70954 /* Protects from simultaneous access to first_req list */
70955 spinlock_t info_list_lock;
70956diff --git a/include/net/flow.h b/include/net/flow.h
70957index 628e11b..4c475df 100644
70958--- a/include/net/flow.h
70959+++ b/include/net/flow.h
70960@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
70961
70962 extern void flow_cache_flush(void);
70963 extern void flow_cache_flush_deferred(void);
70964-extern atomic_t flow_cache_genid;
70965+extern atomic_unchecked_t flow_cache_genid;
70966
70967 #endif
70968diff --git a/include/net/genetlink.h b/include/net/genetlink.h
70969index bdfbe68..4402ebe 100644
70970--- a/include/net/genetlink.h
70971+++ b/include/net/genetlink.h
70972@@ -118,7 +118,7 @@ struct genl_ops {
70973 struct netlink_callback *cb);
70974 int (*done)(struct netlink_callback *cb);
70975 struct list_head ops_list;
70976-};
70977+} __do_const;
70978
70979 extern int genl_register_family(struct genl_family *family);
70980 extern int genl_register_family_with_ops(struct genl_family *family,
70981diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
70982index e5062c9..48a9a4b 100644
70983--- a/include/net/gro_cells.h
70984+++ b/include/net/gro_cells.h
70985@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
70986 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
70987
70988 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
70989- atomic_long_inc(&dev->rx_dropped);
70990+ atomic_long_inc_unchecked(&dev->rx_dropped);
70991 kfree_skb(skb);
70992 return;
70993 }
70994@@ -73,8 +73,8 @@ static inline int gro_cells_init(struct gro_cells *gcells, struct net_device *de
70995 int i;
70996
70997 gcells->gro_cells_mask = roundup_pow_of_two(netif_get_num_default_rss_queues()) - 1;
70998- gcells->cells = kcalloc(sizeof(struct gro_cell),
70999- gcells->gro_cells_mask + 1,
71000+ gcells->cells = kcalloc(gcells->gro_cells_mask + 1,
71001+ sizeof(struct gro_cell),
71002 GFP_KERNEL);
71003 if (!gcells->cells)
71004 return -ENOMEM;
71005diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
71006index 1832927..ce39aea 100644
71007--- a/include/net/inet_connection_sock.h
71008+++ b/include/net/inet_connection_sock.h
71009@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
71010 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
71011 int (*bind_conflict)(const struct sock *sk,
71012 const struct inet_bind_bucket *tb, bool relax);
71013-};
71014+} __do_const;
71015
71016 /** inet_connection_sock - INET connection oriented sock
71017 *
71018diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
71019index 53f464d..ba76aaa 100644
71020--- a/include/net/inetpeer.h
71021+++ b/include/net/inetpeer.h
71022@@ -47,8 +47,8 @@ struct inet_peer {
71023 */
71024 union {
71025 struct {
71026- atomic_t rid; /* Frag reception counter */
71027- atomic_t ip_id_count; /* IP ID for the next packet */
71028+ atomic_unchecked_t rid; /* Frag reception counter */
71029+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
71030 };
71031 struct rcu_head rcu;
71032 struct inet_peer *gc_next;
71033@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
71034 more++;
71035 inet_peer_refcheck(p);
71036 do {
71037- old = atomic_read(&p->ip_id_count);
71038+ old = atomic_read_unchecked(&p->ip_id_count);
71039 new = old + more;
71040 if (!new)
71041 new = 1;
71042- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
71043+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
71044 return new;
71045 }
71046
71047diff --git a/include/net/ip.h b/include/net/ip.h
71048index a68f838..74518ab 100644
71049--- a/include/net/ip.h
71050+++ b/include/net/ip.h
71051@@ -202,7 +202,7 @@ extern struct local_ports {
71052 } sysctl_local_ports;
71053 extern void inet_get_local_port_range(int *low, int *high);
71054
71055-extern unsigned long *sysctl_local_reserved_ports;
71056+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
71057 static inline int inet_is_reserved_local_port(int port)
71058 {
71059 return test_bit(port, sysctl_local_reserved_ports);
71060diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
71061index 9497be1..5a4fafe 100644
71062--- a/include/net/ip_fib.h
71063+++ b/include/net/ip_fib.h
71064@@ -169,7 +169,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
71065
71066 #define FIB_RES_SADDR(net, res) \
71067 ((FIB_RES_NH(res).nh_saddr_genid == \
71068- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
71069+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
71070 FIB_RES_NH(res).nh_saddr : \
71071 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
71072 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
71073diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
71074index 68c69d5..bdab192 100644
71075--- a/include/net/ip_vs.h
71076+++ b/include/net/ip_vs.h
71077@@ -599,7 +599,7 @@ struct ip_vs_conn {
71078 struct ip_vs_conn *control; /* Master control connection */
71079 atomic_t n_control; /* Number of controlled ones */
71080 struct ip_vs_dest *dest; /* real server */
71081- atomic_t in_pkts; /* incoming packet counter */
71082+ atomic_unchecked_t in_pkts; /* incoming packet counter */
71083
71084 /* packet transmitter for different forwarding methods. If it
71085 mangles the packet, it must return NF_DROP or better NF_STOLEN,
71086@@ -737,7 +737,7 @@ struct ip_vs_dest {
71087 __be16 port; /* port number of the server */
71088 union nf_inet_addr addr; /* IP address of the server */
71089 volatile unsigned int flags; /* dest status flags */
71090- atomic_t conn_flags; /* flags to copy to conn */
71091+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
71092 atomic_t weight; /* server weight */
71093
71094 atomic_t refcnt; /* reference counter */
71095@@ -980,11 +980,11 @@ struct netns_ipvs {
71096 /* ip_vs_lblc */
71097 int sysctl_lblc_expiration;
71098 struct ctl_table_header *lblc_ctl_header;
71099- struct ctl_table *lblc_ctl_table;
71100+ ctl_table_no_const *lblc_ctl_table;
71101 /* ip_vs_lblcr */
71102 int sysctl_lblcr_expiration;
71103 struct ctl_table_header *lblcr_ctl_header;
71104- struct ctl_table *lblcr_ctl_table;
71105+ ctl_table_no_const *lblcr_ctl_table;
71106 /* ip_vs_est */
71107 struct list_head est_list; /* estimator list */
71108 spinlock_t est_lock;
71109diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
71110index 80ffde3..968b0f4 100644
71111--- a/include/net/irda/ircomm_tty.h
71112+++ b/include/net/irda/ircomm_tty.h
71113@@ -35,6 +35,7 @@
71114 #include <linux/termios.h>
71115 #include <linux/timer.h>
71116 #include <linux/tty.h> /* struct tty_struct */
71117+#include <asm/local.h>
71118
71119 #include <net/irda/irias_object.h>
71120 #include <net/irda/ircomm_core.h>
71121diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
71122index cc7c197..9f2da2a 100644
71123--- a/include/net/iucv/af_iucv.h
71124+++ b/include/net/iucv/af_iucv.h
71125@@ -141,7 +141,7 @@ struct iucv_sock {
71126 struct iucv_sock_list {
71127 struct hlist_head head;
71128 rwlock_t lock;
71129- atomic_t autobind_name;
71130+ atomic_unchecked_t autobind_name;
71131 };
71132
71133 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
71134diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
71135index df83f69..9b640b8 100644
71136--- a/include/net/llc_c_ac.h
71137+++ b/include/net/llc_c_ac.h
71138@@ -87,7 +87,7 @@
71139 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
71140 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
71141
71142-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
71143+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
71144
71145 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
71146 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
71147diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
71148index 6ca3113..f8026dd 100644
71149--- a/include/net/llc_c_ev.h
71150+++ b/include/net/llc_c_ev.h
71151@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
71152 return (struct llc_conn_state_ev *)skb->cb;
71153 }
71154
71155-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
71156-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
71157+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
71158+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
71159
71160 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
71161 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
71162diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
71163index 0e79cfb..f46db31 100644
71164--- a/include/net/llc_c_st.h
71165+++ b/include/net/llc_c_st.h
71166@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
71167 u8 next_state;
71168 llc_conn_ev_qfyr_t *ev_qualifiers;
71169 llc_conn_action_t *ev_actions;
71170-};
71171+} __do_const;
71172
71173 struct llc_conn_state {
71174 u8 current_state;
71175diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
71176index 37a3bbd..55a4241 100644
71177--- a/include/net/llc_s_ac.h
71178+++ b/include/net/llc_s_ac.h
71179@@ -23,7 +23,7 @@
71180 #define SAP_ACT_TEST_IND 9
71181
71182 /* All action functions must look like this */
71183-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
71184+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
71185
71186 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
71187 struct sk_buff *skb);
71188diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
71189index 567c681..cd73ac0 100644
71190--- a/include/net/llc_s_st.h
71191+++ b/include/net/llc_s_st.h
71192@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
71193 llc_sap_ev_t ev;
71194 u8 next_state;
71195 llc_sap_action_t *ev_actions;
71196-};
71197+} __do_const;
71198
71199 struct llc_sap_state {
71200 u8 curr_state;
71201diff --git a/include/net/mac80211.h b/include/net/mac80211.h
71202index ee50c5e..1bc3b1a 100644
71203--- a/include/net/mac80211.h
71204+++ b/include/net/mac80211.h
71205@@ -3996,7 +3996,7 @@ struct rate_control_ops {
71206 void (*add_sta_debugfs)(void *priv, void *priv_sta,
71207 struct dentry *dir);
71208 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
71209-};
71210+} __do_const;
71211
71212 static inline int rate_supported(struct ieee80211_sta *sta,
71213 enum ieee80211_band band,
71214diff --git a/include/net/neighbour.h b/include/net/neighbour.h
71215index 0dab173..1b76af0 100644
71216--- a/include/net/neighbour.h
71217+++ b/include/net/neighbour.h
71218@@ -123,7 +123,7 @@ struct neigh_ops {
71219 void (*error_report)(struct neighbour *, struct sk_buff *);
71220 int (*output)(struct neighbour *, struct sk_buff *);
71221 int (*connected_output)(struct neighbour *, struct sk_buff *);
71222-};
71223+} __do_const;
71224
71225 struct pneigh_entry {
71226 struct pneigh_entry *next;
71227diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
71228index de644bc..351fd4e 100644
71229--- a/include/net/net_namespace.h
71230+++ b/include/net/net_namespace.h
71231@@ -115,7 +115,7 @@ struct net {
71232 #endif
71233 struct netns_ipvs *ipvs;
71234 struct sock *diag_nlsk;
71235- atomic_t rt_genid;
71236+ atomic_unchecked_t rt_genid;
71237 };
71238
71239 /*
71240@@ -282,7 +282,7 @@ struct pernet_operations {
71241 void (*exit_batch)(struct list_head *net_exit_list);
71242 int *id;
71243 size_t size;
71244-};
71245+} __do_const;
71246
71247 /*
71248 * Use these carefully. If you implement a network device and it
71249@@ -330,12 +330,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
71250
71251 static inline int rt_genid(struct net *net)
71252 {
71253- return atomic_read(&net->rt_genid);
71254+ return atomic_read_unchecked(&net->rt_genid);
71255 }
71256
71257 static inline void rt_genid_bump(struct net *net)
71258 {
71259- atomic_inc(&net->rt_genid);
71260+ atomic_inc_unchecked(&net->rt_genid);
71261 }
71262
71263 #endif /* __NET_NET_NAMESPACE_H */
71264diff --git a/include/net/netdma.h b/include/net/netdma.h
71265index 8ba8ce2..99b7fff 100644
71266--- a/include/net/netdma.h
71267+++ b/include/net/netdma.h
71268@@ -24,7 +24,7 @@
71269 #include <linux/dmaengine.h>
71270 #include <linux/skbuff.h>
71271
71272-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
71273+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
71274 struct sk_buff *skb, int offset, struct iovec *to,
71275 size_t len, struct dma_pinned_list *pinned_list);
71276
71277diff --git a/include/net/netlink.h b/include/net/netlink.h
71278index 9690b0f..87aded7 100644
71279--- a/include/net/netlink.h
71280+++ b/include/net/netlink.h
71281@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
71282 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
71283 {
71284 if (mark)
71285- skb_trim(skb, (unsigned char *) mark - skb->data);
71286+ skb_trim(skb, (const unsigned char *) mark - skb->data);
71287 }
71288
71289 /**
71290diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
71291index 923cb20..deae816 100644
71292--- a/include/net/netns/conntrack.h
71293+++ b/include/net/netns/conntrack.h
71294@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
71295 struct nf_proto_net {
71296 #ifdef CONFIG_SYSCTL
71297 struct ctl_table_header *ctl_table_header;
71298- struct ctl_table *ctl_table;
71299+ ctl_table_no_const *ctl_table;
71300 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
71301 struct ctl_table_header *ctl_compat_header;
71302- struct ctl_table *ctl_compat_table;
71303+ ctl_table_no_const *ctl_compat_table;
71304 #endif
71305 #endif
71306 unsigned int users;
71307@@ -58,7 +58,7 @@ struct nf_ip_net {
71308 struct nf_icmp_net icmpv6;
71309 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
71310 struct ctl_table_header *ctl_table_header;
71311- struct ctl_table *ctl_table;
71312+ ctl_table_no_const *ctl_table;
71313 #endif
71314 };
71315
71316diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
71317index 2ae2b83..dbdc85e 100644
71318--- a/include/net/netns/ipv4.h
71319+++ b/include/net/netns/ipv4.h
71320@@ -64,7 +64,7 @@ struct netns_ipv4 {
71321 kgid_t sysctl_ping_group_range[2];
71322 long sysctl_tcp_mem[3];
71323
71324- atomic_t dev_addr_genid;
71325+ atomic_unchecked_t dev_addr_genid;
71326
71327 #ifdef CONFIG_IP_MROUTE
71328 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
71329diff --git a/include/net/protocol.h b/include/net/protocol.h
71330index 047c047..b9dad15 100644
71331--- a/include/net/protocol.h
71332+++ b/include/net/protocol.h
71333@@ -44,7 +44,7 @@ struct net_protocol {
71334 void (*err_handler)(struct sk_buff *skb, u32 info);
71335 unsigned int no_policy:1,
71336 netns_ok:1;
71337-};
71338+} __do_const;
71339
71340 #if IS_ENABLED(CONFIG_IPV6)
71341 struct inet6_protocol {
71342@@ -57,7 +57,7 @@ struct inet6_protocol {
71343 u8 type, u8 code, int offset,
71344 __be32 info);
71345 unsigned int flags; /* INET6_PROTO_xxx */
71346-};
71347+} __do_const;
71348
71349 #define INET6_PROTO_NOPOLICY 0x1
71350 #define INET6_PROTO_FINAL 0x2
71351diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
71352index 5a15fab..d799ea7 100644
71353--- a/include/net/rtnetlink.h
71354+++ b/include/net/rtnetlink.h
71355@@ -81,7 +81,7 @@ struct rtnl_link_ops {
71356 const struct net_device *dev);
71357 unsigned int (*get_num_tx_queues)(void);
71358 unsigned int (*get_num_rx_queues)(void);
71359-};
71360+} __do_const;
71361
71362 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
71363 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
71364diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
71365index 7fdf298..197e9f7 100644
71366--- a/include/net/sctp/sctp.h
71367+++ b/include/net/sctp/sctp.h
71368@@ -330,9 +330,9 @@ do { \
71369
71370 #else /* SCTP_DEBUG */
71371
71372-#define SCTP_DEBUG_PRINTK(whatever...)
71373-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
71374-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
71375+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
71376+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
71377+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
71378 #define SCTP_ENABLE_DEBUG
71379 #define SCTP_DISABLE_DEBUG
71380 #define SCTP_ASSERT(expr, str, func)
71381diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
71382index 2a82d13..62a31c2 100644
71383--- a/include/net/sctp/sm.h
71384+++ b/include/net/sctp/sm.h
71385@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
71386 typedef struct {
71387 sctp_state_fn_t *fn;
71388 const char *name;
71389-} sctp_sm_table_entry_t;
71390+} __do_const sctp_sm_table_entry_t;
71391
71392 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
71393 * currently in use.
71394@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
71395 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
71396
71397 /* Extern declarations for major data structures. */
71398-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
71399+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
71400
71401
71402 /* Get the size of a DATA chunk payload. */
71403diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
71404index fdeb85a..1329d95 100644
71405--- a/include/net/sctp/structs.h
71406+++ b/include/net/sctp/structs.h
71407@@ -517,7 +517,7 @@ struct sctp_pf {
71408 struct sctp_association *asoc);
71409 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
71410 struct sctp_af *af;
71411-};
71412+} __do_const;
71413
71414
71415 /* Structure to track chunk fragments that have been acked, but peer
71416diff --git a/include/net/sock.h b/include/net/sock.h
71417index 25afaa0..8bb0070 100644
71418--- a/include/net/sock.h
71419+++ b/include/net/sock.h
71420@@ -322,7 +322,7 @@ struct sock {
71421 #ifdef CONFIG_RPS
71422 __u32 sk_rxhash;
71423 #endif
71424- atomic_t sk_drops;
71425+ atomic_unchecked_t sk_drops;
71426 int sk_rcvbuf;
71427
71428 struct sk_filter __rcu *sk_filter;
71429@@ -1781,7 +1781,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
71430 }
71431
71432 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
71433- char __user *from, char *to,
71434+ char __user *from, unsigned char *to,
71435 int copy, int offset)
71436 {
71437 if (skb->ip_summed == CHECKSUM_NONE) {
71438@@ -2040,7 +2040,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
71439 }
71440 }
71441
71442-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
71443+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
71444
71445 /**
71446 * sk_page_frag - return an appropriate page_frag
71447diff --git a/include/net/tcp.h b/include/net/tcp.h
71448index aed42c7..43890c6 100644
71449--- a/include/net/tcp.h
71450+++ b/include/net/tcp.h
71451@@ -530,7 +530,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
71452 extern void tcp_xmit_retransmit_queue(struct sock *);
71453 extern void tcp_simple_retransmit(struct sock *);
71454 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
71455-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
71456+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
71457
71458 extern void tcp_send_probe0(struct sock *);
71459 extern void tcp_send_partial(struct sock *);
71460@@ -701,8 +701,8 @@ struct tcp_skb_cb {
71461 struct inet6_skb_parm h6;
71462 #endif
71463 } header; /* For incoming frames */
71464- __u32 seq; /* Starting sequence number */
71465- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
71466+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
71467+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
71468 __u32 when; /* used to compute rtt's */
71469 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
71470
71471@@ -716,7 +716,7 @@ struct tcp_skb_cb {
71472
71473 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
71474 /* 1 byte hole */
71475- __u32 ack_seq; /* Sequence number ACK'd */
71476+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
71477 };
71478
71479 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
71480diff --git a/include/net/xfrm.h b/include/net/xfrm.h
71481index 63445ed..d6fc34f 100644
71482--- a/include/net/xfrm.h
71483+++ b/include/net/xfrm.h
71484@@ -304,7 +304,7 @@ struct xfrm_policy_afinfo {
71485 struct net_device *dev,
71486 const struct flowi *fl);
71487 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
71488-};
71489+} __do_const;
71490
71491 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
71492 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
71493@@ -340,7 +340,7 @@ struct xfrm_state_afinfo {
71494 struct sk_buff *skb);
71495 int (*transport_finish)(struct sk_buff *skb,
71496 int async);
71497-};
71498+} __do_const;
71499
71500 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
71501 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
71502@@ -423,7 +423,7 @@ struct xfrm_mode {
71503 struct module *owner;
71504 unsigned int encap;
71505 int flags;
71506-};
71507+} __do_const;
71508
71509 /* Flags for xfrm_mode. */
71510 enum {
71511@@ -514,7 +514,7 @@ struct xfrm_policy {
71512 struct timer_list timer;
71513
71514 struct flow_cache_object flo;
71515- atomic_t genid;
71516+ atomic_unchecked_t genid;
71517 u32 priority;
71518 u32 index;
71519 struct xfrm_mark mark;
71520diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
71521index 1a046b1..ee0bef0 100644
71522--- a/include/rdma/iw_cm.h
71523+++ b/include/rdma/iw_cm.h
71524@@ -122,7 +122,7 @@ struct iw_cm_verbs {
71525 int backlog);
71526
71527 int (*destroy_listen)(struct iw_cm_id *cm_id);
71528-};
71529+} __no_const;
71530
71531 /**
71532 * iw_create_cm_id - Create an IW CM identifier.
71533diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
71534index 399162b..b337f1a 100644
71535--- a/include/scsi/libfc.h
71536+++ b/include/scsi/libfc.h
71537@@ -762,6 +762,7 @@ struct libfc_function_template {
71538 */
71539 void (*disc_stop_final) (struct fc_lport *);
71540 };
71541+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
71542
71543 /**
71544 * struct fc_disc - Discovery context
71545@@ -866,7 +867,7 @@ struct fc_lport {
71546 struct fc_vport *vport;
71547
71548 /* Operational Information */
71549- struct libfc_function_template tt;
71550+ libfc_function_template_no_const tt;
71551 u8 link_up;
71552 u8 qfull;
71553 enum fc_lport_state state;
71554diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
71555index e65c62e..aa2e5a2 100644
71556--- a/include/scsi/scsi_device.h
71557+++ b/include/scsi/scsi_device.h
71558@@ -170,9 +170,9 @@ struct scsi_device {
71559 unsigned int max_device_blocked; /* what device_blocked counts down from */
71560 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
71561
71562- atomic_t iorequest_cnt;
71563- atomic_t iodone_cnt;
71564- atomic_t ioerr_cnt;
71565+ atomic_unchecked_t iorequest_cnt;
71566+ atomic_unchecked_t iodone_cnt;
71567+ atomic_unchecked_t ioerr_cnt;
71568
71569 struct device sdev_gendev,
71570 sdev_dev;
71571diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
71572index b797e8f..8e2c3aa 100644
71573--- a/include/scsi/scsi_transport_fc.h
71574+++ b/include/scsi/scsi_transport_fc.h
71575@@ -751,7 +751,8 @@ struct fc_function_template {
71576 unsigned long show_host_system_hostname:1;
71577
71578 unsigned long disable_target_scan:1;
71579-};
71580+} __do_const;
71581+typedef struct fc_function_template __no_const fc_function_template_no_const;
71582
71583
71584 /**
71585diff --git a/include/sound/soc.h b/include/sound/soc.h
71586index bc56738..a4be132 100644
71587--- a/include/sound/soc.h
71588+++ b/include/sound/soc.h
71589@@ -771,7 +771,7 @@ struct snd_soc_codec_driver {
71590 /* probe ordering - for components with runtime dependencies */
71591 int probe_order;
71592 int remove_order;
71593-};
71594+} __do_const;
71595
71596 /* SoC platform interface */
71597 struct snd_soc_platform_driver {
71598@@ -817,7 +817,7 @@ struct snd_soc_platform_driver {
71599 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
71600 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
71601 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
71602-};
71603+} __do_const;
71604
71605 struct snd_soc_platform {
71606 const char *name;
71607diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
71608index 663e34a..91b306a 100644
71609--- a/include/target/target_core_base.h
71610+++ b/include/target/target_core_base.h
71611@@ -654,7 +654,7 @@ struct se_device {
71612 spinlock_t stats_lock;
71613 /* Active commands on this virtual SE device */
71614 atomic_t simple_cmds;
71615- atomic_t dev_ordered_id;
71616+ atomic_unchecked_t dev_ordered_id;
71617 atomic_t dev_ordered_sync;
71618 atomic_t dev_qf_count;
71619 int export_count;
71620diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
71621new file mode 100644
71622index 0000000..fb634b7
71623--- /dev/null
71624+++ b/include/trace/events/fs.h
71625@@ -0,0 +1,53 @@
71626+#undef TRACE_SYSTEM
71627+#define TRACE_SYSTEM fs
71628+
71629+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
71630+#define _TRACE_FS_H
71631+
71632+#include <linux/fs.h>
71633+#include <linux/tracepoint.h>
71634+
71635+TRACE_EVENT(do_sys_open,
71636+
71637+ TP_PROTO(const char *filename, int flags, int mode),
71638+
71639+ TP_ARGS(filename, flags, mode),
71640+
71641+ TP_STRUCT__entry(
71642+ __string( filename, filename )
71643+ __field( int, flags )
71644+ __field( int, mode )
71645+ ),
71646+
71647+ TP_fast_assign(
71648+ __assign_str(filename, filename);
71649+ __entry->flags = flags;
71650+ __entry->mode = mode;
71651+ ),
71652+
71653+ TP_printk("\"%s\" %x %o",
71654+ __get_str(filename), __entry->flags, __entry->mode)
71655+);
71656+
71657+TRACE_EVENT(open_exec,
71658+
71659+ TP_PROTO(const char *filename),
71660+
71661+ TP_ARGS(filename),
71662+
71663+ TP_STRUCT__entry(
71664+ __string( filename, filename )
71665+ ),
71666+
71667+ TP_fast_assign(
71668+ __assign_str(filename, filename);
71669+ ),
71670+
71671+ TP_printk("\"%s\"",
71672+ __get_str(filename))
71673+);
71674+
71675+#endif /* _TRACE_FS_H */
71676+
71677+/* This part must be outside protection */
71678+#include <trace/define_trace.h>
71679diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
71680index 1c09820..7f5ec79 100644
71681--- a/include/trace/events/irq.h
71682+++ b/include/trace/events/irq.h
71683@@ -36,7 +36,7 @@ struct softirq_action;
71684 */
71685 TRACE_EVENT(irq_handler_entry,
71686
71687- TP_PROTO(int irq, struct irqaction *action),
71688+ TP_PROTO(int irq, const struct irqaction *action),
71689
71690 TP_ARGS(irq, action),
71691
71692@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
71693 */
71694 TRACE_EVENT(irq_handler_exit,
71695
71696- TP_PROTO(int irq, struct irqaction *action, int ret),
71697+ TP_PROTO(int irq, const struct irqaction *action, int ret),
71698
71699 TP_ARGS(irq, action, ret),
71700
71701diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
71702index 7caf44c..23c6f27 100644
71703--- a/include/uapi/linux/a.out.h
71704+++ b/include/uapi/linux/a.out.h
71705@@ -39,6 +39,14 @@ enum machine_type {
71706 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
71707 };
71708
71709+/* Constants for the N_FLAGS field */
71710+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
71711+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
71712+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
71713+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
71714+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
71715+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
71716+
71717 #if !defined (N_MAGIC)
71718 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
71719 #endif
71720diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
71721index d876736..b36014e 100644
71722--- a/include/uapi/linux/byteorder/little_endian.h
71723+++ b/include/uapi/linux/byteorder/little_endian.h
71724@@ -42,51 +42,51 @@
71725
71726 static inline __le64 __cpu_to_le64p(const __u64 *p)
71727 {
71728- return (__force __le64)*p;
71729+ return (__force const __le64)*p;
71730 }
71731 static inline __u64 __le64_to_cpup(const __le64 *p)
71732 {
71733- return (__force __u64)*p;
71734+ return (__force const __u64)*p;
71735 }
71736 static inline __le32 __cpu_to_le32p(const __u32 *p)
71737 {
71738- return (__force __le32)*p;
71739+ return (__force const __le32)*p;
71740 }
71741 static inline __u32 __le32_to_cpup(const __le32 *p)
71742 {
71743- return (__force __u32)*p;
71744+ return (__force const __u32)*p;
71745 }
71746 static inline __le16 __cpu_to_le16p(const __u16 *p)
71747 {
71748- return (__force __le16)*p;
71749+ return (__force const __le16)*p;
71750 }
71751 static inline __u16 __le16_to_cpup(const __le16 *p)
71752 {
71753- return (__force __u16)*p;
71754+ return (__force const __u16)*p;
71755 }
71756 static inline __be64 __cpu_to_be64p(const __u64 *p)
71757 {
71758- return (__force __be64)__swab64p(p);
71759+ return (__force const __be64)__swab64p(p);
71760 }
71761 static inline __u64 __be64_to_cpup(const __be64 *p)
71762 {
71763- return __swab64p((__u64 *)p);
71764+ return __swab64p((const __u64 *)p);
71765 }
71766 static inline __be32 __cpu_to_be32p(const __u32 *p)
71767 {
71768- return (__force __be32)__swab32p(p);
71769+ return (__force const __be32)__swab32p(p);
71770 }
71771 static inline __u32 __be32_to_cpup(const __be32 *p)
71772 {
71773- return __swab32p((__u32 *)p);
71774+ return __swab32p((const __u32 *)p);
71775 }
71776 static inline __be16 __cpu_to_be16p(const __u16 *p)
71777 {
71778- return (__force __be16)__swab16p(p);
71779+ return (__force const __be16)__swab16p(p);
71780 }
71781 static inline __u16 __be16_to_cpup(const __be16 *p)
71782 {
71783- return __swab16p((__u16 *)p);
71784+ return __swab16p((const __u16 *)p);
71785 }
71786 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
71787 #define __le64_to_cpus(x) do { (void)(x); } while (0)
71788diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
71789index 126a817..d522bd1 100644
71790--- a/include/uapi/linux/elf.h
71791+++ b/include/uapi/linux/elf.h
71792@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
71793 #define PT_GNU_EH_FRAME 0x6474e550
71794
71795 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
71796+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
71797+
71798+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
71799+
71800+/* Constants for the e_flags field */
71801+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
71802+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
71803+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
71804+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
71805+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
71806+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
71807
71808 /*
71809 * Extended Numbering
71810@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
71811 #define DT_DEBUG 21
71812 #define DT_TEXTREL 22
71813 #define DT_JMPREL 23
71814+#define DT_FLAGS 30
71815+ #define DF_TEXTREL 0x00000004
71816 #define DT_ENCODING 32
71817 #define OLD_DT_LOOS 0x60000000
71818 #define DT_LOOS 0x6000000d
71819@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
71820 #define PF_W 0x2
71821 #define PF_X 0x1
71822
71823+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
71824+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
71825+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
71826+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
71827+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
71828+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
71829+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
71830+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
71831+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
71832+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
71833+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
71834+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
71835+
71836 typedef struct elf32_phdr{
71837 Elf32_Word p_type;
71838 Elf32_Off p_offset;
71839@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
71840 #define EI_OSABI 7
71841 #define EI_PAD 8
71842
71843+#define EI_PAX 14
71844+
71845 #define ELFMAG0 0x7f /* EI_MAG */
71846 #define ELFMAG1 'E'
71847 #define ELFMAG2 'L'
71848diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
71849index aa169c4..6a2771d 100644
71850--- a/include/uapi/linux/personality.h
71851+++ b/include/uapi/linux/personality.h
71852@@ -30,6 +30,7 @@ enum {
71853 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
71854 ADDR_NO_RANDOMIZE | \
71855 ADDR_COMPAT_LAYOUT | \
71856+ ADDR_LIMIT_3GB | \
71857 MMAP_PAGE_ZERO)
71858
71859 /*
71860diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
71861index 7530e74..e714828 100644
71862--- a/include/uapi/linux/screen_info.h
71863+++ b/include/uapi/linux/screen_info.h
71864@@ -43,7 +43,8 @@ struct screen_info {
71865 __u16 pages; /* 0x32 */
71866 __u16 vesa_attributes; /* 0x34 */
71867 __u32 capabilities; /* 0x36 */
71868- __u8 _reserved[6]; /* 0x3a */
71869+ __u16 vesapm_size; /* 0x3a */
71870+ __u8 _reserved[4]; /* 0x3c */
71871 } __attribute__((packed));
71872
71873 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
71874diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
71875index 6d67213..8dab561 100644
71876--- a/include/uapi/linux/sysctl.h
71877+++ b/include/uapi/linux/sysctl.h
71878@@ -155,7 +155,11 @@ enum
71879 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
71880 };
71881
71882-
71883+#ifdef CONFIG_PAX_SOFTMODE
71884+enum {
71885+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
71886+};
71887+#endif
71888
71889 /* CTL_VM names: */
71890 enum
71891diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
71892index 26607bd..588b65f 100644
71893--- a/include/uapi/linux/xattr.h
71894+++ b/include/uapi/linux/xattr.h
71895@@ -60,5 +60,9 @@
71896 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
71897 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
71898
71899+/* User namespace */
71900+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
71901+#define XATTR_PAX_FLAGS_SUFFIX "flags"
71902+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
71903
71904 #endif /* _UAPI_LINUX_XATTR_H */
71905diff --git a/include/video/udlfb.h b/include/video/udlfb.h
71906index f9466fa..f4e2b81 100644
71907--- a/include/video/udlfb.h
71908+++ b/include/video/udlfb.h
71909@@ -53,10 +53,10 @@ struct dlfb_data {
71910 u32 pseudo_palette[256];
71911 int blank_mode; /*one of FB_BLANK_ */
71912 /* blit-only rendering path metrics, exposed through sysfs */
71913- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
71914- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
71915- atomic_t bytes_sent; /* to usb, after compression including overhead */
71916- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
71917+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
71918+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
71919+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
71920+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
71921 };
71922
71923 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
71924diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
71925index 0993a22..32ba2fe 100644
71926--- a/include/video/uvesafb.h
71927+++ b/include/video/uvesafb.h
71928@@ -177,6 +177,7 @@ struct uvesafb_par {
71929 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
71930 u8 pmi_setpal; /* PMI for palette changes */
71931 u16 *pmi_base; /* protected mode interface location */
71932+ u8 *pmi_code; /* protected mode code location */
71933 void *pmi_start;
71934 void *pmi_pal;
71935 u8 *vbe_state_orig; /*
71936diff --git a/init/Kconfig b/init/Kconfig
71937index be8b7f5..1eeca9b 100644
71938--- a/init/Kconfig
71939+++ b/init/Kconfig
71940@@ -990,6 +990,7 @@ endif # CGROUPS
71941
71942 config CHECKPOINT_RESTORE
71943 bool "Checkpoint/restore support" if EXPERT
71944+ depends on !GRKERNSEC
71945 default n
71946 help
71947 Enables additional kernel features in a sake of checkpoint/restore.
71948@@ -1468,7 +1469,7 @@ config SLUB_DEBUG
71949
71950 config COMPAT_BRK
71951 bool "Disable heap randomization"
71952- default y
71953+ default n
71954 help
71955 Randomizing heap placement makes heap exploits harder, but it
71956 also breaks ancient binaries (including anything libc5 based).
71957@@ -1711,7 +1712,7 @@ config INIT_ALL_POSSIBLE
71958 config STOP_MACHINE
71959 bool
71960 default y
71961- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
71962+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
71963 help
71964 Need stop_machine() primitive.
71965
71966diff --git a/init/Makefile b/init/Makefile
71967index 7bc47ee..6da2dc7 100644
71968--- a/init/Makefile
71969+++ b/init/Makefile
71970@@ -2,6 +2,9 @@
71971 # Makefile for the linux kernel.
71972 #
71973
71974+ccflags-y := $(GCC_PLUGINS_CFLAGS)
71975+asflags-y := $(GCC_PLUGINS_AFLAGS)
71976+
71977 obj-y := main.o version.o mounts.o
71978 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
71979 obj-y += noinitramfs.o
71980diff --git a/init/do_mounts.c b/init/do_mounts.c
71981index 1d1b634..a1c810f 100644
71982--- a/init/do_mounts.c
71983+++ b/init/do_mounts.c
71984@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
71985 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
71986 {
71987 struct super_block *s;
71988- int err = sys_mount(name, "/root", fs, flags, data);
71989+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
71990 if (err)
71991 return err;
71992
71993- sys_chdir("/root");
71994+ sys_chdir((const char __force_user *)"/root");
71995 s = current->fs->pwd.dentry->d_sb;
71996 ROOT_DEV = s->s_dev;
71997 printk(KERN_INFO
71998@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
71999 va_start(args, fmt);
72000 vsprintf(buf, fmt, args);
72001 va_end(args);
72002- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
72003+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
72004 if (fd >= 0) {
72005 sys_ioctl(fd, FDEJECT, 0);
72006 sys_close(fd);
72007 }
72008 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
72009- fd = sys_open("/dev/console", O_RDWR, 0);
72010+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
72011 if (fd >= 0) {
72012 sys_ioctl(fd, TCGETS, (long)&termios);
72013 termios.c_lflag &= ~ICANON;
72014 sys_ioctl(fd, TCSETSF, (long)&termios);
72015- sys_read(fd, &c, 1);
72016+ sys_read(fd, (char __user *)&c, 1);
72017 termios.c_lflag |= ICANON;
72018 sys_ioctl(fd, TCSETSF, (long)&termios);
72019 sys_close(fd);
72020@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
72021 mount_root();
72022 out:
72023 devtmpfs_mount("dev");
72024- sys_mount(".", "/", NULL, MS_MOVE, NULL);
72025- sys_chroot(".");
72026+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
72027+ sys_chroot((const char __force_user *)".");
72028 }
72029diff --git a/init/do_mounts.h b/init/do_mounts.h
72030index f5b978a..69dbfe8 100644
72031--- a/init/do_mounts.h
72032+++ b/init/do_mounts.h
72033@@ -15,15 +15,15 @@ extern int root_mountflags;
72034
72035 static inline int create_dev(char *name, dev_t dev)
72036 {
72037- sys_unlink(name);
72038- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
72039+ sys_unlink((char __force_user *)name);
72040+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
72041 }
72042
72043 #if BITS_PER_LONG == 32
72044 static inline u32 bstat(char *name)
72045 {
72046 struct stat64 stat;
72047- if (sys_stat64(name, &stat) != 0)
72048+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
72049 return 0;
72050 if (!S_ISBLK(stat.st_mode))
72051 return 0;
72052@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
72053 static inline u32 bstat(char *name)
72054 {
72055 struct stat stat;
72056- if (sys_newstat(name, &stat) != 0)
72057+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
72058 return 0;
72059 if (!S_ISBLK(stat.st_mode))
72060 return 0;
72061diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
72062index f9acf71..1e19144 100644
72063--- a/init/do_mounts_initrd.c
72064+++ b/init/do_mounts_initrd.c
72065@@ -58,8 +58,8 @@ static void __init handle_initrd(void)
72066 create_dev("/dev/root.old", Root_RAM0);
72067 /* mount initrd on rootfs' /root */
72068 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
72069- sys_mkdir("/old", 0700);
72070- sys_chdir("/old");
72071+ sys_mkdir((const char __force_user *)"/old", 0700);
72072+ sys_chdir((const char __force_user *)"/old");
72073
72074 /*
72075 * In case that a resume from disk is carried out by linuxrc or one of
72076@@ -73,31 +73,31 @@ static void __init handle_initrd(void)
72077 current->flags &= ~PF_FREEZER_SKIP;
72078
72079 /* move initrd to rootfs' /old */
72080- sys_mount("..", ".", NULL, MS_MOVE, NULL);
72081+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
72082 /* switch root and cwd back to / of rootfs */
72083- sys_chroot("..");
72084+ sys_chroot((const char __force_user *)"..");
72085
72086 if (new_decode_dev(real_root_dev) == Root_RAM0) {
72087- sys_chdir("/old");
72088+ sys_chdir((const char __force_user *)"/old");
72089 return;
72090 }
72091
72092- sys_chdir("/");
72093+ sys_chdir((const char __force_user *)"/");
72094 ROOT_DEV = new_decode_dev(real_root_dev);
72095 mount_root();
72096
72097 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
72098- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
72099+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
72100 if (!error)
72101 printk("okay\n");
72102 else {
72103- int fd = sys_open("/dev/root.old", O_RDWR, 0);
72104+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
72105 if (error == -ENOENT)
72106 printk("/initrd does not exist. Ignored.\n");
72107 else
72108 printk("failed\n");
72109 printk(KERN_NOTICE "Unmounting old root\n");
72110- sys_umount("/old", MNT_DETACH);
72111+ sys_umount((char __force_user *)"/old", MNT_DETACH);
72112 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
72113 if (fd < 0) {
72114 error = fd;
72115@@ -120,11 +120,11 @@ int __init initrd_load(void)
72116 * mounted in the normal path.
72117 */
72118 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
72119- sys_unlink("/initrd.image");
72120+ sys_unlink((const char __force_user *)"/initrd.image");
72121 handle_initrd();
72122 return 1;
72123 }
72124 }
72125- sys_unlink("/initrd.image");
72126+ sys_unlink((const char __force_user *)"/initrd.image");
72127 return 0;
72128 }
72129diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
72130index 8cb6db5..d729f50 100644
72131--- a/init/do_mounts_md.c
72132+++ b/init/do_mounts_md.c
72133@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
72134 partitioned ? "_d" : "", minor,
72135 md_setup_args[ent].device_names);
72136
72137- fd = sys_open(name, 0, 0);
72138+ fd = sys_open((char __force_user *)name, 0, 0);
72139 if (fd < 0) {
72140 printk(KERN_ERR "md: open failed - cannot start "
72141 "array %s\n", name);
72142@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
72143 * array without it
72144 */
72145 sys_close(fd);
72146- fd = sys_open(name, 0, 0);
72147+ fd = sys_open((char __force_user *)name, 0, 0);
72148 sys_ioctl(fd, BLKRRPART, 0);
72149 }
72150 sys_close(fd);
72151@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
72152
72153 wait_for_device_probe();
72154
72155- fd = sys_open("/dev/md0", 0, 0);
72156+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
72157 if (fd >= 0) {
72158 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
72159 sys_close(fd);
72160diff --git a/init/init_task.c b/init/init_task.c
72161index 8b2f399..f0797c9 100644
72162--- a/init/init_task.c
72163+++ b/init/init_task.c
72164@@ -20,5 +20,9 @@ EXPORT_SYMBOL(init_task);
72165 * Initial thread structure. Alignment of this is handled by a special
72166 * linker map entry.
72167 */
72168+#ifdef CONFIG_X86
72169+union thread_union init_thread_union __init_task_data;
72170+#else
72171 union thread_union init_thread_union __init_task_data =
72172 { INIT_THREAD_INFO(init_task) };
72173+#endif
72174diff --git a/init/initramfs.c b/init/initramfs.c
72175index 84c6bf1..8899338 100644
72176--- a/init/initramfs.c
72177+++ b/init/initramfs.c
72178@@ -84,7 +84,7 @@ static void __init free_hash(void)
72179 }
72180 }
72181
72182-static long __init do_utime(char *filename, time_t mtime)
72183+static long __init do_utime(char __force_user *filename, time_t mtime)
72184 {
72185 struct timespec t[2];
72186
72187@@ -119,7 +119,7 @@ static void __init dir_utime(void)
72188 struct dir_entry *de, *tmp;
72189 list_for_each_entry_safe(de, tmp, &dir_list, list) {
72190 list_del(&de->list);
72191- do_utime(de->name, de->mtime);
72192+ do_utime((char __force_user *)de->name, de->mtime);
72193 kfree(de->name);
72194 kfree(de);
72195 }
72196@@ -281,7 +281,7 @@ static int __init maybe_link(void)
72197 if (nlink >= 2) {
72198 char *old = find_link(major, minor, ino, mode, collected);
72199 if (old)
72200- return (sys_link(old, collected) < 0) ? -1 : 1;
72201+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
72202 }
72203 return 0;
72204 }
72205@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
72206 {
72207 struct stat st;
72208
72209- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
72210+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
72211 if (S_ISDIR(st.st_mode))
72212- sys_rmdir(path);
72213+ sys_rmdir((char __force_user *)path);
72214 else
72215- sys_unlink(path);
72216+ sys_unlink((char __force_user *)path);
72217 }
72218 }
72219
72220@@ -315,7 +315,7 @@ static int __init do_name(void)
72221 int openflags = O_WRONLY|O_CREAT;
72222 if (ml != 1)
72223 openflags |= O_TRUNC;
72224- wfd = sys_open(collected, openflags, mode);
72225+ wfd = sys_open((char __force_user *)collected, openflags, mode);
72226
72227 if (wfd >= 0) {
72228 sys_fchown(wfd, uid, gid);
72229@@ -327,17 +327,17 @@ static int __init do_name(void)
72230 }
72231 }
72232 } else if (S_ISDIR(mode)) {
72233- sys_mkdir(collected, mode);
72234- sys_chown(collected, uid, gid);
72235- sys_chmod(collected, mode);
72236+ sys_mkdir((char __force_user *)collected, mode);
72237+ sys_chown((char __force_user *)collected, uid, gid);
72238+ sys_chmod((char __force_user *)collected, mode);
72239 dir_add(collected, mtime);
72240 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
72241 S_ISFIFO(mode) || S_ISSOCK(mode)) {
72242 if (maybe_link() == 0) {
72243- sys_mknod(collected, mode, rdev);
72244- sys_chown(collected, uid, gid);
72245- sys_chmod(collected, mode);
72246- do_utime(collected, mtime);
72247+ sys_mknod((char __force_user *)collected, mode, rdev);
72248+ sys_chown((char __force_user *)collected, uid, gid);
72249+ sys_chmod((char __force_user *)collected, mode);
72250+ do_utime((char __force_user *)collected, mtime);
72251 }
72252 }
72253 return 0;
72254@@ -346,15 +346,15 @@ static int __init do_name(void)
72255 static int __init do_copy(void)
72256 {
72257 if (count >= body_len) {
72258- sys_write(wfd, victim, body_len);
72259+ sys_write(wfd, (char __force_user *)victim, body_len);
72260 sys_close(wfd);
72261- do_utime(vcollected, mtime);
72262+ do_utime((char __force_user *)vcollected, mtime);
72263 kfree(vcollected);
72264 eat(body_len);
72265 state = SkipIt;
72266 return 0;
72267 } else {
72268- sys_write(wfd, victim, count);
72269+ sys_write(wfd, (char __force_user *)victim, count);
72270 body_len -= count;
72271 eat(count);
72272 return 1;
72273@@ -365,9 +365,9 @@ static int __init do_symlink(void)
72274 {
72275 collected[N_ALIGN(name_len) + body_len] = '\0';
72276 clean_path(collected, 0);
72277- sys_symlink(collected + N_ALIGN(name_len), collected);
72278- sys_lchown(collected, uid, gid);
72279- do_utime(collected, mtime);
72280+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
72281+ sys_lchown((char __force_user *)collected, uid, gid);
72282+ do_utime((char __force_user *)collected, mtime);
72283 state = SkipIt;
72284 next_state = Reset;
72285 return 0;
72286diff --git a/init/main.c b/init/main.c
72287index cee4b5c..9c267d9 100644
72288--- a/init/main.c
72289+++ b/init/main.c
72290@@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void) { }
72291 extern void tc_init(void);
72292 #endif
72293
72294+extern void grsecurity_init(void);
72295+
72296 /*
72297 * Debug helper: via this flag we know that we are in 'early bootup code'
72298 * where only the boot processor is running with IRQ disabled. This means
72299@@ -149,6 +151,61 @@ static int __init set_reset_devices(char *str)
72300
72301 __setup("reset_devices", set_reset_devices);
72302
72303+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
72304+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
72305+static int __init setup_grsec_proc_gid(char *str)
72306+{
72307+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
72308+ return 1;
72309+}
72310+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
72311+#endif
72312+
72313+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
72314+extern char pax_enter_kernel_user[];
72315+extern char pax_exit_kernel_user[];
72316+extern pgdval_t clone_pgd_mask;
72317+#endif
72318+
72319+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
72320+static int __init setup_pax_nouderef(char *str)
72321+{
72322+#ifdef CONFIG_X86_32
72323+ unsigned int cpu;
72324+ struct desc_struct *gdt;
72325+
72326+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
72327+ gdt = get_cpu_gdt_table(cpu);
72328+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
72329+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
72330+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
72331+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
72332+ }
72333+ loadsegment(ds, __KERNEL_DS);
72334+ loadsegment(es, __KERNEL_DS);
72335+ loadsegment(ss, __KERNEL_DS);
72336+#else
72337+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
72338+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
72339+ clone_pgd_mask = ~(pgdval_t)0UL;
72340+#endif
72341+
72342+ return 0;
72343+}
72344+early_param("pax_nouderef", setup_pax_nouderef);
72345+#endif
72346+
72347+#ifdef CONFIG_PAX_SOFTMODE
72348+int pax_softmode;
72349+
72350+static int __init setup_pax_softmode(char *str)
72351+{
72352+ get_option(&str, &pax_softmode);
72353+ return 1;
72354+}
72355+__setup("pax_softmode=", setup_pax_softmode);
72356+#endif
72357+
72358 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
72359 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
72360 static const char *panic_later, *panic_param;
72361@@ -681,6 +738,7 @@ int __init_or_module do_one_initcall(initcall_t fn)
72362 {
72363 int count = preempt_count();
72364 int ret;
72365+ const char *msg1 = "", *msg2 = "";
72366
72367 if (initcall_debug)
72368 ret = do_one_initcall_debug(fn);
72369@@ -693,15 +751,15 @@ int __init_or_module do_one_initcall(initcall_t fn)
72370 sprintf(msgbuf, "error code %d ", ret);
72371
72372 if (preempt_count() != count) {
72373- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
72374+ msg1 = " preemption imbalance";
72375 preempt_count() = count;
72376 }
72377 if (irqs_disabled()) {
72378- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
72379+ msg2 = " disabled interrupts";
72380 local_irq_enable();
72381 }
72382- if (msgbuf[0]) {
72383- printk("initcall %pF returned with %s\n", fn, msgbuf);
72384+ if (msgbuf[0] || *msg1 || *msg2) {
72385+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
72386 }
72387
72388 return ret;
72389@@ -743,6 +801,10 @@ static char *initcall_level_names[] __initdata = {
72390 "late",
72391 };
72392
72393+#ifdef CONFIG_PAX_LATENT_ENTROPY
72394+u64 latent_entropy;
72395+#endif
72396+
72397 static void __init do_initcall_level(int level)
72398 {
72399 extern const struct kernel_param __start___param[], __stop___param[];
72400@@ -755,8 +817,14 @@ static void __init do_initcall_level(int level)
72401 level, level,
72402 &repair_env_string);
72403
72404- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
72405+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
72406 do_one_initcall(*fn);
72407+
72408+#ifdef CONFIG_PAX_LATENT_ENTROPY
72409+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
72410+#endif
72411+
72412+ }
72413 }
72414
72415 static void __init do_initcalls(void)
72416@@ -790,8 +858,14 @@ static void __init do_pre_smp_initcalls(void)
72417 {
72418 initcall_t *fn;
72419
72420- for (fn = __initcall_start; fn < __initcall0_start; fn++)
72421+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
72422 do_one_initcall(*fn);
72423+
72424+#ifdef CONFIG_PAX_LATENT_ENTROPY
72425+ add_device_randomness(&latent_entropy, sizeof(latent_entropy));
72426+#endif
72427+
72428+ }
72429 }
72430
72431 static int run_init_process(const char *init_filename)
72432@@ -877,7 +951,7 @@ static noinline void __init kernel_init_freeable(void)
72433 do_basic_setup();
72434
72435 /* Open the /dev/console on the rootfs, this should never fail */
72436- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
72437+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
72438 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
72439
72440 (void) sys_dup(0);
72441@@ -890,11 +964,13 @@ static noinline void __init kernel_init_freeable(void)
72442 if (!ramdisk_execute_command)
72443 ramdisk_execute_command = "/init";
72444
72445- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
72446+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
72447 ramdisk_execute_command = NULL;
72448 prepare_namespace();
72449 }
72450
72451+ grsecurity_init();
72452+
72453 /*
72454 * Ok, we have completed the initial bootup, and
72455 * we're essentially up and running. Get rid of the
72456diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
72457index 130dfec..cc88451 100644
72458--- a/ipc/ipc_sysctl.c
72459+++ b/ipc/ipc_sysctl.c
72460@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
72461 static int proc_ipc_dointvec(ctl_table *table, int write,
72462 void __user *buffer, size_t *lenp, loff_t *ppos)
72463 {
72464- struct ctl_table ipc_table;
72465+ ctl_table_no_const ipc_table;
72466
72467 memcpy(&ipc_table, table, sizeof(ipc_table));
72468 ipc_table.data = get_ipc(table);
72469@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
72470 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
72471 void __user *buffer, size_t *lenp, loff_t *ppos)
72472 {
72473- struct ctl_table ipc_table;
72474+ ctl_table_no_const ipc_table;
72475
72476 memcpy(&ipc_table, table, sizeof(ipc_table));
72477 ipc_table.data = get_ipc(table);
72478@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
72479 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
72480 void __user *buffer, size_t *lenp, loff_t *ppos)
72481 {
72482- struct ctl_table ipc_table;
72483+ ctl_table_no_const ipc_table;
72484 size_t lenp_bef = *lenp;
72485 int rc;
72486
72487@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
72488 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
72489 void __user *buffer, size_t *lenp, loff_t *ppos)
72490 {
72491- struct ctl_table ipc_table;
72492+ ctl_table_no_const ipc_table;
72493 memcpy(&ipc_table, table, sizeof(ipc_table));
72494 ipc_table.data = get_ipc(table);
72495
72496@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
72497 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
72498 void __user *buffer, size_t *lenp, loff_t *ppos)
72499 {
72500- struct ctl_table ipc_table;
72501+ ctl_table_no_const ipc_table;
72502 size_t lenp_bef = *lenp;
72503 int oldval;
72504 int rc;
72505diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
72506index 383d638..943fdbb 100644
72507--- a/ipc/mq_sysctl.c
72508+++ b/ipc/mq_sysctl.c
72509@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
72510 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
72511 void __user *buffer, size_t *lenp, loff_t *ppos)
72512 {
72513- struct ctl_table mq_table;
72514+ ctl_table_no_const mq_table;
72515 memcpy(&mq_table, table, sizeof(mq_table));
72516 mq_table.data = get_mq(table);
72517
72518diff --git a/ipc/mqueue.c b/ipc/mqueue.c
72519index 71a3ca1..cc330ee 100644
72520--- a/ipc/mqueue.c
72521+++ b/ipc/mqueue.c
72522@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
72523 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
72524 info->attr.mq_msgsize);
72525
72526+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
72527 spin_lock(&mq_lock);
72528 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
72529 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
72530diff --git a/ipc/msg.c b/ipc/msg.c
72531index 950572f..362ea07 100644
72532--- a/ipc/msg.c
72533+++ b/ipc/msg.c
72534@@ -309,18 +309,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
72535 return security_msg_queue_associate(msq, msgflg);
72536 }
72537
72538+static struct ipc_ops msg_ops = {
72539+ .getnew = newque,
72540+ .associate = msg_security,
72541+ .more_checks = NULL
72542+};
72543+
72544 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
72545 {
72546 struct ipc_namespace *ns;
72547- struct ipc_ops msg_ops;
72548 struct ipc_params msg_params;
72549
72550 ns = current->nsproxy->ipc_ns;
72551
72552- msg_ops.getnew = newque;
72553- msg_ops.associate = msg_security;
72554- msg_ops.more_checks = NULL;
72555-
72556 msg_params.key = key;
72557 msg_params.flg = msgflg;
72558
72559@@ -820,15 +821,17 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
72560 struct msg_msg *copy = NULL;
72561 unsigned long copy_number = 0;
72562
72563+ ns = current->nsproxy->ipc_ns;
72564+
72565 if (msqid < 0 || (long) bufsz < 0)
72566 return -EINVAL;
72567 if (msgflg & MSG_COPY) {
72568- copy = prepare_copy(buf, bufsz, msgflg, &msgtyp, &copy_number);
72569+ copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax),
72570+ msgflg, &msgtyp, &copy_number);
72571 if (IS_ERR(copy))
72572 return PTR_ERR(copy);
72573 }
72574 mode = convert_mode(&msgtyp, msgflg);
72575- ns = current->nsproxy->ipc_ns;
72576
72577 msq = msg_lock_check(ns, msqid);
72578 if (IS_ERR(msq)) {
72579diff --git a/ipc/msgutil.c b/ipc/msgutil.c
72580index ebfcbfa..5df8e4b 100644
72581--- a/ipc/msgutil.c
72582+++ b/ipc/msgutil.c
72583@@ -117,9 +117,6 @@ struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst)
72584 if (alen > DATALEN_MSG)
72585 alen = DATALEN_MSG;
72586
72587- dst->next = NULL;
72588- dst->security = NULL;
72589-
72590 memcpy(dst + 1, src + 1, alen);
72591
72592 len -= alen;
72593diff --git a/ipc/sem.c b/ipc/sem.c
72594index 58d31f1..cce7a55 100644
72595--- a/ipc/sem.c
72596+++ b/ipc/sem.c
72597@@ -364,10 +364,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
72598 return 0;
72599 }
72600
72601+static struct ipc_ops sem_ops = {
72602+ .getnew = newary,
72603+ .associate = sem_security,
72604+ .more_checks = sem_more_checks
72605+};
72606+
72607 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
72608 {
72609 struct ipc_namespace *ns;
72610- struct ipc_ops sem_ops;
72611 struct ipc_params sem_params;
72612
72613 ns = current->nsproxy->ipc_ns;
72614@@ -375,10 +380,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
72615 if (nsems < 0 || nsems > ns->sc_semmsl)
72616 return -EINVAL;
72617
72618- sem_ops.getnew = newary;
72619- sem_ops.associate = sem_security;
72620- sem_ops.more_checks = sem_more_checks;
72621-
72622 sem_params.key = key;
72623 sem_params.flg = semflg;
72624 sem_params.u.nsems = nsems;
72625diff --git a/ipc/shm.c b/ipc/shm.c
72626index 4fa6d8f..55cff14 100644
72627--- a/ipc/shm.c
72628+++ b/ipc/shm.c
72629@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
72630 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
72631 #endif
72632
72633+#ifdef CONFIG_GRKERNSEC
72634+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72635+ const time_t shm_createtime, const kuid_t cuid,
72636+ const int shmid);
72637+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
72638+ const time_t shm_createtime);
72639+#endif
72640+
72641 void shm_init_ns(struct ipc_namespace *ns)
72642 {
72643 ns->shm_ctlmax = SHMMAX;
72644@@ -521,6 +529,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
72645 shp->shm_lprid = 0;
72646 shp->shm_atim = shp->shm_dtim = 0;
72647 shp->shm_ctim = get_seconds();
72648+#ifdef CONFIG_GRKERNSEC
72649+ {
72650+ struct timespec timeval;
72651+ do_posix_clock_monotonic_gettime(&timeval);
72652+
72653+ shp->shm_createtime = timeval.tv_sec;
72654+ }
72655+#endif
72656 shp->shm_segsz = size;
72657 shp->shm_nattch = 0;
72658 shp->shm_file = file;
72659@@ -572,18 +588,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
72660 return 0;
72661 }
72662
72663+static struct ipc_ops shm_ops = {
72664+ .getnew = newseg,
72665+ .associate = shm_security,
72666+ .more_checks = shm_more_checks
72667+};
72668+
72669 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
72670 {
72671 struct ipc_namespace *ns;
72672- struct ipc_ops shm_ops;
72673 struct ipc_params shm_params;
72674
72675 ns = current->nsproxy->ipc_ns;
72676
72677- shm_ops.getnew = newseg;
72678- shm_ops.associate = shm_security;
72679- shm_ops.more_checks = shm_more_checks;
72680-
72681 shm_params.key = key;
72682 shm_params.flg = shmflg;
72683 shm_params.u.size = size;
72684@@ -1004,6 +1021,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
72685 f_mode = FMODE_READ | FMODE_WRITE;
72686 }
72687 if (shmflg & SHM_EXEC) {
72688+
72689+#ifdef CONFIG_PAX_MPROTECT
72690+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
72691+ goto out;
72692+#endif
72693+
72694 prot |= PROT_EXEC;
72695 acc_mode |= S_IXUGO;
72696 }
72697@@ -1027,9 +1050,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
72698 if (err)
72699 goto out_unlock;
72700
72701+#ifdef CONFIG_GRKERNSEC
72702+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
72703+ shp->shm_perm.cuid, shmid) ||
72704+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
72705+ err = -EACCES;
72706+ goto out_unlock;
72707+ }
72708+#endif
72709+
72710 path = shp->shm_file->f_path;
72711 path_get(&path);
72712 shp->shm_nattch++;
72713+#ifdef CONFIG_GRKERNSEC
72714+ shp->shm_lapid = current->pid;
72715+#endif
72716 size = i_size_read(path.dentry->d_inode);
72717 shm_unlock(shp);
72718
72719diff --git a/kernel/acct.c b/kernel/acct.c
72720index 051e071..15e0920 100644
72721--- a/kernel/acct.c
72722+++ b/kernel/acct.c
72723@@ -550,7 +550,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
72724 */
72725 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
72726 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
72727- file->f_op->write(file, (char *)&ac,
72728+ file->f_op->write(file, (char __force_user *)&ac,
72729 sizeof(acct_t), &file->f_pos);
72730 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
72731 set_fs(fs);
72732diff --git a/kernel/audit.c b/kernel/audit.c
72733index d596e53..dbef3c3 100644
72734--- a/kernel/audit.c
72735+++ b/kernel/audit.c
72736@@ -116,7 +116,7 @@ u32 audit_sig_sid = 0;
72737 3) suppressed due to audit_rate_limit
72738 4) suppressed due to audit_backlog_limit
72739 */
72740-static atomic_t audit_lost = ATOMIC_INIT(0);
72741+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
72742
72743 /* The netlink socket. */
72744 static struct sock *audit_sock;
72745@@ -238,7 +238,7 @@ void audit_log_lost(const char *message)
72746 unsigned long now;
72747 int print;
72748
72749- atomic_inc(&audit_lost);
72750+ atomic_inc_unchecked(&audit_lost);
72751
72752 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
72753
72754@@ -257,7 +257,7 @@ void audit_log_lost(const char *message)
72755 printk(KERN_WARNING
72756 "audit: audit_lost=%d audit_rate_limit=%d "
72757 "audit_backlog_limit=%d\n",
72758- atomic_read(&audit_lost),
72759+ atomic_read_unchecked(&audit_lost),
72760 audit_rate_limit,
72761 audit_backlog_limit);
72762 audit_panic(message);
72763@@ -681,7 +681,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
72764 status_set.pid = audit_pid;
72765 status_set.rate_limit = audit_rate_limit;
72766 status_set.backlog_limit = audit_backlog_limit;
72767- status_set.lost = atomic_read(&audit_lost);
72768+ status_set.lost = atomic_read_unchecked(&audit_lost);
72769 status_set.backlog = skb_queue_len(&audit_skb_queue);
72770 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
72771 &status_set, sizeof(status_set));
72772diff --git a/kernel/auditsc.c b/kernel/auditsc.c
72773index a371f85..da826c1 100644
72774--- a/kernel/auditsc.c
72775+++ b/kernel/auditsc.c
72776@@ -2292,7 +2292,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
72777 }
72778
72779 /* global counter which is incremented every time something logs in */
72780-static atomic_t session_id = ATOMIC_INIT(0);
72781+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
72782
72783 /**
72784 * audit_set_loginuid - set current task's audit_context loginuid
72785@@ -2316,7 +2316,7 @@ int audit_set_loginuid(kuid_t loginuid)
72786 return -EPERM;
72787 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
72788
72789- sessionid = atomic_inc_return(&session_id);
72790+ sessionid = atomic_inc_return_unchecked(&session_id);
72791 if (context && context->in_syscall) {
72792 struct audit_buffer *ab;
72793
72794diff --git a/kernel/capability.c b/kernel/capability.c
72795index 493d972..f87dfbd 100644
72796--- a/kernel/capability.c
72797+++ b/kernel/capability.c
72798@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
72799 * before modification is attempted and the application
72800 * fails.
72801 */
72802+ if (tocopy > ARRAY_SIZE(kdata))
72803+ return -EFAULT;
72804+
72805 if (copy_to_user(dataptr, kdata, tocopy
72806 * sizeof(struct __user_cap_data_struct))) {
72807 return -EFAULT;
72808@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
72809 int ret;
72810
72811 rcu_read_lock();
72812- ret = security_capable(__task_cred(t), ns, cap);
72813+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
72814+ gr_task_is_capable(t, __task_cred(t), cap);
72815 rcu_read_unlock();
72816
72817- return (ret == 0);
72818+ return ret;
72819 }
72820
72821 /**
72822@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
72823 int ret;
72824
72825 rcu_read_lock();
72826- ret = security_capable_noaudit(__task_cred(t), ns, cap);
72827+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
72828 rcu_read_unlock();
72829
72830- return (ret == 0);
72831+ return ret;
72832 }
72833
72834 /**
72835@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
72836 BUG();
72837 }
72838
72839- if (security_capable(current_cred(), ns, cap) == 0) {
72840+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
72841 current->flags |= PF_SUPERPRIV;
72842 return true;
72843 }
72844@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
72845 }
72846 EXPORT_SYMBOL(ns_capable);
72847
72848+bool ns_capable_nolog(struct user_namespace *ns, int cap)
72849+{
72850+ if (unlikely(!cap_valid(cap))) {
72851+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
72852+ BUG();
72853+ }
72854+
72855+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
72856+ current->flags |= PF_SUPERPRIV;
72857+ return true;
72858+ }
72859+ return false;
72860+}
72861+EXPORT_SYMBOL(ns_capable_nolog);
72862+
72863 /**
72864 * capable - Determine if the current task has a superior capability in effect
72865 * @cap: The capability to be tested for
72866@@ -408,6 +427,12 @@ bool capable(int cap)
72867 }
72868 EXPORT_SYMBOL(capable);
72869
72870+bool capable_nolog(int cap)
72871+{
72872+ return ns_capable_nolog(&init_user_ns, cap);
72873+}
72874+EXPORT_SYMBOL(capable_nolog);
72875+
72876 /**
72877 * nsown_capable - Check superior capability to one's own user_ns
72878 * @cap: The capability in question
72879@@ -440,3 +465,10 @@ bool inode_capable(const struct inode *inode, int cap)
72880
72881 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
72882 }
72883+
72884+bool inode_capable_nolog(const struct inode *inode, int cap)
72885+{
72886+ struct user_namespace *ns = current_user_ns();
72887+
72888+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
72889+}
72890diff --git a/kernel/cgroup.c b/kernel/cgroup.c
72891index 1e23664..570a83d 100644
72892--- a/kernel/cgroup.c
72893+++ b/kernel/cgroup.c
72894@@ -5543,7 +5543,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
72895 struct css_set *cg = link->cg;
72896 struct task_struct *task;
72897 int count = 0;
72898- seq_printf(seq, "css_set %p\n", cg);
72899+ seq_printf(seq, "css_set %pK\n", cg);
72900 list_for_each_entry(task, &cg->tasks, cg_list) {
72901 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
72902 seq_puts(seq, " ...\n");
72903diff --git a/kernel/compat.c b/kernel/compat.c
72904index 36700e9..73d770c 100644
72905--- a/kernel/compat.c
72906+++ b/kernel/compat.c
72907@@ -13,6 +13,7 @@
72908
72909 #include <linux/linkage.h>
72910 #include <linux/compat.h>
72911+#include <linux/module.h>
72912 #include <linux/errno.h>
72913 #include <linux/time.h>
72914 #include <linux/signal.h>
72915@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
72916 mm_segment_t oldfs;
72917 long ret;
72918
72919- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
72920+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
72921 oldfs = get_fs();
72922 set_fs(KERNEL_DS);
72923 ret = hrtimer_nanosleep_restart(restart);
72924@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
72925 oldfs = get_fs();
72926 set_fs(KERNEL_DS);
72927 ret = hrtimer_nanosleep(&tu,
72928- rmtp ? (struct timespec __user *)&rmt : NULL,
72929+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
72930 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
72931 set_fs(oldfs);
72932
72933@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
72934 mm_segment_t old_fs = get_fs();
72935
72936 set_fs(KERNEL_DS);
72937- ret = sys_sigpending((old_sigset_t __user *) &s);
72938+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
72939 set_fs(old_fs);
72940 if (ret == 0)
72941 ret = put_user(s, set);
72942@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
72943 mm_segment_t old_fs = get_fs();
72944
72945 set_fs(KERNEL_DS);
72946- ret = sys_old_getrlimit(resource, &r);
72947+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
72948 set_fs(old_fs);
72949
72950 if (!ret) {
72951@@ -523,7 +524,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
72952 mm_segment_t old_fs = get_fs();
72953
72954 set_fs(KERNEL_DS);
72955- ret = sys_getrusage(who, (struct rusage __user *) &r);
72956+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
72957 set_fs(old_fs);
72958
72959 if (ret)
72960@@ -552,8 +553,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
72961 set_fs (KERNEL_DS);
72962 ret = sys_wait4(pid,
72963 (stat_addr ?
72964- (unsigned int __user *) &status : NULL),
72965- options, (struct rusage __user *) &r);
72966+ (unsigned int __force_user *) &status : NULL),
72967+ options, (struct rusage __force_user *) &r);
72968 set_fs (old_fs);
72969
72970 if (ret > 0) {
72971@@ -579,8 +580,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
72972 memset(&info, 0, sizeof(info));
72973
72974 set_fs(KERNEL_DS);
72975- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
72976- uru ? (struct rusage __user *)&ru : NULL);
72977+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
72978+ uru ? (struct rusage __force_user *)&ru : NULL);
72979 set_fs(old_fs);
72980
72981 if ((ret < 0) || (info.si_signo == 0))
72982@@ -714,8 +715,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
72983 oldfs = get_fs();
72984 set_fs(KERNEL_DS);
72985 err = sys_timer_settime(timer_id, flags,
72986- (struct itimerspec __user *) &newts,
72987- (struct itimerspec __user *) &oldts);
72988+ (struct itimerspec __force_user *) &newts,
72989+ (struct itimerspec __force_user *) &oldts);
72990 set_fs(oldfs);
72991 if (!err && old && put_compat_itimerspec(old, &oldts))
72992 return -EFAULT;
72993@@ -732,7 +733,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
72994 oldfs = get_fs();
72995 set_fs(KERNEL_DS);
72996 err = sys_timer_gettime(timer_id,
72997- (struct itimerspec __user *) &ts);
72998+ (struct itimerspec __force_user *) &ts);
72999 set_fs(oldfs);
73000 if (!err && put_compat_itimerspec(setting, &ts))
73001 return -EFAULT;
73002@@ -751,7 +752,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
73003 oldfs = get_fs();
73004 set_fs(KERNEL_DS);
73005 err = sys_clock_settime(which_clock,
73006- (struct timespec __user *) &ts);
73007+ (struct timespec __force_user *) &ts);
73008 set_fs(oldfs);
73009 return err;
73010 }
73011@@ -766,7 +767,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
73012 oldfs = get_fs();
73013 set_fs(KERNEL_DS);
73014 err = sys_clock_gettime(which_clock,
73015- (struct timespec __user *) &ts);
73016+ (struct timespec __force_user *) &ts);
73017 set_fs(oldfs);
73018 if (!err && put_compat_timespec(&ts, tp))
73019 return -EFAULT;
73020@@ -786,7 +787,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
73021
73022 oldfs = get_fs();
73023 set_fs(KERNEL_DS);
73024- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
73025+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
73026 set_fs(oldfs);
73027
73028 err = compat_put_timex(utp, &txc);
73029@@ -806,7 +807,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
73030 oldfs = get_fs();
73031 set_fs(KERNEL_DS);
73032 err = sys_clock_getres(which_clock,
73033- (struct timespec __user *) &ts);
73034+ (struct timespec __force_user *) &ts);
73035 set_fs(oldfs);
73036 if (!err && tp && put_compat_timespec(&ts, tp))
73037 return -EFAULT;
73038@@ -818,9 +819,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
73039 long err;
73040 mm_segment_t oldfs;
73041 struct timespec tu;
73042- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
73043+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
73044
73045- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
73046+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
73047 oldfs = get_fs();
73048 set_fs(KERNEL_DS);
73049 err = clock_nanosleep_restart(restart);
73050@@ -852,8 +853,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
73051 oldfs = get_fs();
73052 set_fs(KERNEL_DS);
73053 err = sys_clock_nanosleep(which_clock, flags,
73054- (struct timespec __user *) &in,
73055- (struct timespec __user *) &out);
73056+ (struct timespec __force_user *) &in,
73057+ (struct timespec __force_user *) &out);
73058 set_fs(oldfs);
73059
73060 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
73061diff --git a/kernel/configs.c b/kernel/configs.c
73062index 42e8fa0..9e7406b 100644
73063--- a/kernel/configs.c
73064+++ b/kernel/configs.c
73065@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
73066 struct proc_dir_entry *entry;
73067
73068 /* create the current config file */
73069+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
73070+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
73071+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
73072+ &ikconfig_file_ops);
73073+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73074+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
73075+ &ikconfig_file_ops);
73076+#endif
73077+#else
73078 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
73079 &ikconfig_file_ops);
73080+#endif
73081+
73082 if (!entry)
73083 return -ENOMEM;
73084
73085diff --git a/kernel/cred.c b/kernel/cred.c
73086index e0573a4..3874e41 100644
73087--- a/kernel/cred.c
73088+++ b/kernel/cred.c
73089@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
73090 validate_creds(cred);
73091 alter_cred_subscribers(cred, -1);
73092 put_cred(cred);
73093+
73094+#ifdef CONFIG_GRKERNSEC_SETXID
73095+ cred = (struct cred *) tsk->delayed_cred;
73096+ if (cred != NULL) {
73097+ tsk->delayed_cred = NULL;
73098+ validate_creds(cred);
73099+ alter_cred_subscribers(cred, -1);
73100+ put_cred(cred);
73101+ }
73102+#endif
73103 }
73104
73105 /**
73106@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
73107 * Always returns 0 thus allowing this function to be tail-called at the end
73108 * of, say, sys_setgid().
73109 */
73110-int commit_creds(struct cred *new)
73111+static int __commit_creds(struct cred *new)
73112 {
73113 struct task_struct *task = current;
73114 const struct cred *old = task->real_cred;
73115@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
73116
73117 get_cred(new); /* we will require a ref for the subj creds too */
73118
73119+ gr_set_role_label(task, new->uid, new->gid);
73120+
73121 /* dumpability changes */
73122 if (!uid_eq(old->euid, new->euid) ||
73123 !gid_eq(old->egid, new->egid) ||
73124@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
73125 put_cred(old);
73126 return 0;
73127 }
73128+#ifdef CONFIG_GRKERNSEC_SETXID
73129+extern int set_user(struct cred *new);
73130+
73131+void gr_delayed_cred_worker(void)
73132+{
73133+ const struct cred *new = current->delayed_cred;
73134+ struct cred *ncred;
73135+
73136+ current->delayed_cred = NULL;
73137+
73138+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
73139+ // from doing get_cred on it when queueing this
73140+ put_cred(new);
73141+ return;
73142+ } else if (new == NULL)
73143+ return;
73144+
73145+ ncred = prepare_creds();
73146+ if (!ncred)
73147+ goto die;
73148+ // uids
73149+ ncred->uid = new->uid;
73150+ ncred->euid = new->euid;
73151+ ncred->suid = new->suid;
73152+ ncred->fsuid = new->fsuid;
73153+ // gids
73154+ ncred->gid = new->gid;
73155+ ncred->egid = new->egid;
73156+ ncred->sgid = new->sgid;
73157+ ncred->fsgid = new->fsgid;
73158+ // groups
73159+ if (set_groups(ncred, new->group_info) < 0) {
73160+ abort_creds(ncred);
73161+ goto die;
73162+ }
73163+ // caps
73164+ ncred->securebits = new->securebits;
73165+ ncred->cap_inheritable = new->cap_inheritable;
73166+ ncred->cap_permitted = new->cap_permitted;
73167+ ncred->cap_effective = new->cap_effective;
73168+ ncred->cap_bset = new->cap_bset;
73169+
73170+ if (set_user(ncred)) {
73171+ abort_creds(ncred);
73172+ goto die;
73173+ }
73174+
73175+ // from doing get_cred on it when queueing this
73176+ put_cred(new);
73177+
73178+ __commit_creds(ncred);
73179+ return;
73180+die:
73181+ // from doing get_cred on it when queueing this
73182+ put_cred(new);
73183+ do_group_exit(SIGKILL);
73184+}
73185+#endif
73186+
73187+int commit_creds(struct cred *new)
73188+{
73189+#ifdef CONFIG_GRKERNSEC_SETXID
73190+ int ret;
73191+ int schedule_it = 0;
73192+ struct task_struct *t;
73193+
73194+ /* we won't get called with tasklist_lock held for writing
73195+ and interrupts disabled as the cred struct in that case is
73196+ init_cred
73197+ */
73198+ if (grsec_enable_setxid && !current_is_single_threaded() &&
73199+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
73200+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
73201+ schedule_it = 1;
73202+ }
73203+ ret = __commit_creds(new);
73204+ if (schedule_it) {
73205+ rcu_read_lock();
73206+ read_lock(&tasklist_lock);
73207+ for (t = next_thread(current); t != current;
73208+ t = next_thread(t)) {
73209+ if (t->delayed_cred == NULL) {
73210+ t->delayed_cred = get_cred(new);
73211+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
73212+ set_tsk_need_resched(t);
73213+ }
73214+ }
73215+ read_unlock(&tasklist_lock);
73216+ rcu_read_unlock();
73217+ }
73218+ return ret;
73219+#else
73220+ return __commit_creds(new);
73221+#endif
73222+}
73223+
73224 EXPORT_SYMBOL(commit_creds);
73225
73226 /**
73227diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
73228index 9a61738..c5c8f3a 100644
73229--- a/kernel/debug/debug_core.c
73230+++ b/kernel/debug/debug_core.c
73231@@ -122,7 +122,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
73232 */
73233 static atomic_t masters_in_kgdb;
73234 static atomic_t slaves_in_kgdb;
73235-static atomic_t kgdb_break_tasklet_var;
73236+static atomic_unchecked_t kgdb_break_tasklet_var;
73237 atomic_t kgdb_setting_breakpoint;
73238
73239 struct task_struct *kgdb_usethread;
73240@@ -132,7 +132,7 @@ int kgdb_single_step;
73241 static pid_t kgdb_sstep_pid;
73242
73243 /* to keep track of the CPU which is doing the single stepping*/
73244-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73245+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
73246
73247 /*
73248 * If you are debugging a problem where roundup (the collection of
73249@@ -540,7 +540,7 @@ return_normal:
73250 * kernel will only try for the value of sstep_tries before
73251 * giving up and continuing on.
73252 */
73253- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
73254+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
73255 (kgdb_info[cpu].task &&
73256 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
73257 atomic_set(&kgdb_active, -1);
73258@@ -634,8 +634,8 @@ cpu_master_loop:
73259 }
73260
73261 kgdb_restore:
73262- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
73263- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
73264+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
73265+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
73266 if (kgdb_info[sstep_cpu].task)
73267 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
73268 else
73269@@ -887,18 +887,18 @@ static void kgdb_unregister_callbacks(void)
73270 static void kgdb_tasklet_bpt(unsigned long ing)
73271 {
73272 kgdb_breakpoint();
73273- atomic_set(&kgdb_break_tasklet_var, 0);
73274+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
73275 }
73276
73277 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
73278
73279 void kgdb_schedule_breakpoint(void)
73280 {
73281- if (atomic_read(&kgdb_break_tasklet_var) ||
73282+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
73283 atomic_read(&kgdb_active) != -1 ||
73284 atomic_read(&kgdb_setting_breakpoint))
73285 return;
73286- atomic_inc(&kgdb_break_tasklet_var);
73287+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
73288 tasklet_schedule(&kgdb_tasklet_breakpoint);
73289 }
73290 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
73291diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
73292index 8875254..7cf4928 100644
73293--- a/kernel/debug/kdb/kdb_main.c
73294+++ b/kernel/debug/kdb/kdb_main.c
73295@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
73296 continue;
73297
73298 kdb_printf("%-20s%8u 0x%p ", mod->name,
73299- mod->core_size, (void *)mod);
73300+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
73301 #ifdef CONFIG_MODULE_UNLOAD
73302 kdb_printf("%4ld ", module_refcount(mod));
73303 #endif
73304@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
73305 kdb_printf(" (Loading)");
73306 else
73307 kdb_printf(" (Live)");
73308- kdb_printf(" 0x%p", mod->module_core);
73309+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73310
73311 #ifdef CONFIG_MODULE_UNLOAD
73312 {
73313diff --git a/kernel/events/core.c b/kernel/events/core.c
73314index 7b6646a..3cb1135 100644
73315--- a/kernel/events/core.c
73316+++ b/kernel/events/core.c
73317@@ -182,7 +182,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
73318 return 0;
73319 }
73320
73321-static atomic64_t perf_event_id;
73322+static atomic64_unchecked_t perf_event_id;
73323
73324 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
73325 enum event_type_t event_type);
73326@@ -2677,7 +2677,7 @@ static void __perf_event_read(void *info)
73327
73328 static inline u64 perf_event_count(struct perf_event *event)
73329 {
73330- return local64_read(&event->count) + atomic64_read(&event->child_count);
73331+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
73332 }
73333
73334 static u64 perf_event_read(struct perf_event *event)
73335@@ -3007,9 +3007,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
73336 mutex_lock(&event->child_mutex);
73337 total += perf_event_read(event);
73338 *enabled += event->total_time_enabled +
73339- atomic64_read(&event->child_total_time_enabled);
73340+ atomic64_read_unchecked(&event->child_total_time_enabled);
73341 *running += event->total_time_running +
73342- atomic64_read(&event->child_total_time_running);
73343+ atomic64_read_unchecked(&event->child_total_time_running);
73344
73345 list_for_each_entry(child, &event->child_list, child_list) {
73346 total += perf_event_read(child);
73347@@ -3412,10 +3412,10 @@ void perf_event_update_userpage(struct perf_event *event)
73348 userpg->offset -= local64_read(&event->hw.prev_count);
73349
73350 userpg->time_enabled = enabled +
73351- atomic64_read(&event->child_total_time_enabled);
73352+ atomic64_read_unchecked(&event->child_total_time_enabled);
73353
73354 userpg->time_running = running +
73355- atomic64_read(&event->child_total_time_running);
73356+ atomic64_read_unchecked(&event->child_total_time_running);
73357
73358 arch_perf_update_userpage(userpg, now);
73359
73360@@ -3974,11 +3974,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73361 values[n++] = perf_event_count(event);
73362 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73363 values[n++] = enabled +
73364- atomic64_read(&event->child_total_time_enabled);
73365+ atomic64_read_unchecked(&event->child_total_time_enabled);
73366 }
73367 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73368 values[n++] = running +
73369- atomic64_read(&event->child_total_time_running);
73370+ atomic64_read_unchecked(&event->child_total_time_running);
73371 }
73372 if (read_format & PERF_FORMAT_ID)
73373 values[n++] = primary_event_id(event);
73374@@ -4721,12 +4721,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73375 * need to add enough zero bytes after the string to handle
73376 * the 64bit alignment we do later.
73377 */
73378- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73379+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
73380 if (!buf) {
73381 name = strncpy(tmp, "//enomem", sizeof(tmp));
73382 goto got_name;
73383 }
73384- name = d_path(&file->f_path, buf, PATH_MAX);
73385+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73386 if (IS_ERR(name)) {
73387 name = strncpy(tmp, "//toolong", sizeof(tmp));
73388 goto got_name;
73389@@ -6165,7 +6165,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
73390 event->parent = parent_event;
73391
73392 event->ns = get_pid_ns(task_active_pid_ns(current));
73393- event->id = atomic64_inc_return(&perf_event_id);
73394+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
73395
73396 event->state = PERF_EVENT_STATE_INACTIVE;
73397
73398@@ -6790,10 +6790,10 @@ static void sync_child_event(struct perf_event *child_event,
73399 /*
73400 * Add back the child's count to the parent's count:
73401 */
73402- atomic64_add(child_val, &parent_event->child_count);
73403- atomic64_add(child_event->total_time_enabled,
73404+ atomic64_add_unchecked(child_val, &parent_event->child_count);
73405+ atomic64_add_unchecked(child_event->total_time_enabled,
73406 &parent_event->child_total_time_enabled);
73407- atomic64_add(child_event->total_time_running,
73408+ atomic64_add_unchecked(child_event->total_time_running,
73409 &parent_event->child_total_time_running);
73410
73411 /*
73412diff --git a/kernel/exit.c b/kernel/exit.c
73413index b4df219..f13c02d 100644
73414--- a/kernel/exit.c
73415+++ b/kernel/exit.c
73416@@ -170,6 +170,10 @@ void release_task(struct task_struct * p)
73417 struct task_struct *leader;
73418 int zap_leader;
73419 repeat:
73420+#ifdef CONFIG_NET
73421+ gr_del_task_from_ip_table(p);
73422+#endif
73423+
73424 /* don't need to get the RCU readlock here - the process is dead and
73425 * can't be modifying its own credentials. But shut RCU-lockdep up */
73426 rcu_read_lock();
73427@@ -338,7 +342,7 @@ int allow_signal(int sig)
73428 * know it'll be handled, so that they don't get converted to
73429 * SIGKILL or just silently dropped.
73430 */
73431- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
73432+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
73433 recalc_sigpending();
73434 spin_unlock_irq(&current->sighand->siglock);
73435 return 0;
73436@@ -708,6 +712,8 @@ void do_exit(long code)
73437 struct task_struct *tsk = current;
73438 int group_dead;
73439
73440+ set_fs(USER_DS);
73441+
73442 profile_task_exit(tsk);
73443
73444 WARN_ON(blk_needs_flush_plug(tsk));
73445@@ -724,7 +730,6 @@ void do_exit(long code)
73446 * mm_release()->clear_child_tid() from writing to a user-controlled
73447 * kernel address.
73448 */
73449- set_fs(USER_DS);
73450
73451 ptrace_event(PTRACE_EVENT_EXIT, code);
73452
73453@@ -783,6 +788,9 @@ void do_exit(long code)
73454 tsk->exit_code = code;
73455 taskstats_exit(tsk, group_dead);
73456
73457+ gr_acl_handle_psacct(tsk, code);
73458+ gr_acl_handle_exit();
73459+
73460 exit_mm(tsk);
73461
73462 if (group_dead)
73463@@ -903,7 +911,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
73464 * Take down every thread in the group. This is called by fatal signals
73465 * as well as by sys_exit_group (below).
73466 */
73467-void
73468+__noreturn void
73469 do_group_exit(int exit_code)
73470 {
73471 struct signal_struct *sig = current->signal;
73472diff --git a/kernel/fork.c b/kernel/fork.c
73473index c535f33..1d768f9 100644
73474--- a/kernel/fork.c
73475+++ b/kernel/fork.c
73476@@ -318,7 +318,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
73477 *stackend = STACK_END_MAGIC; /* for overflow detection */
73478
73479 #ifdef CONFIG_CC_STACKPROTECTOR
73480- tsk->stack_canary = get_random_int();
73481+ tsk->stack_canary = pax_get_random_long();
73482 #endif
73483
73484 /*
73485@@ -344,13 +344,81 @@ free_tsk:
73486 }
73487
73488 #ifdef CONFIG_MMU
73489+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
73490+{
73491+ struct vm_area_struct *tmp;
73492+ unsigned long charge;
73493+ struct mempolicy *pol;
73494+ struct file *file;
73495+
73496+ charge = 0;
73497+ if (mpnt->vm_flags & VM_ACCOUNT) {
73498+ unsigned long len = vma_pages(mpnt);
73499+
73500+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
73501+ goto fail_nomem;
73502+ charge = len;
73503+ }
73504+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73505+ if (!tmp)
73506+ goto fail_nomem;
73507+ *tmp = *mpnt;
73508+ tmp->vm_mm = mm;
73509+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
73510+ pol = mpol_dup(vma_policy(mpnt));
73511+ if (IS_ERR(pol))
73512+ goto fail_nomem_policy;
73513+ vma_set_policy(tmp, pol);
73514+ if (anon_vma_fork(tmp, mpnt))
73515+ goto fail_nomem_anon_vma_fork;
73516+ tmp->vm_flags &= ~VM_LOCKED;
73517+ tmp->vm_next = tmp->vm_prev = NULL;
73518+ tmp->vm_mirror = NULL;
73519+ file = tmp->vm_file;
73520+ if (file) {
73521+ struct inode *inode = file->f_path.dentry->d_inode;
73522+ struct address_space *mapping = file->f_mapping;
73523+
73524+ get_file(file);
73525+ if (tmp->vm_flags & VM_DENYWRITE)
73526+ atomic_dec(&inode->i_writecount);
73527+ mutex_lock(&mapping->i_mmap_mutex);
73528+ if (tmp->vm_flags & VM_SHARED)
73529+ mapping->i_mmap_writable++;
73530+ flush_dcache_mmap_lock(mapping);
73531+ /* insert tmp into the share list, just after mpnt */
73532+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
73533+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
73534+ else
73535+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
73536+ flush_dcache_mmap_unlock(mapping);
73537+ mutex_unlock(&mapping->i_mmap_mutex);
73538+ }
73539+
73540+ /*
73541+ * Clear hugetlb-related page reserves for children. This only
73542+ * affects MAP_PRIVATE mappings. Faults generated by the child
73543+ * are not guaranteed to succeed, even if read-only
73544+ */
73545+ if (is_vm_hugetlb_page(tmp))
73546+ reset_vma_resv_huge_pages(tmp);
73547+
73548+ return tmp;
73549+
73550+fail_nomem_anon_vma_fork:
73551+ mpol_put(pol);
73552+fail_nomem_policy:
73553+ kmem_cache_free(vm_area_cachep, tmp);
73554+fail_nomem:
73555+ vm_unacct_memory(charge);
73556+ return NULL;
73557+}
73558+
73559 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73560 {
73561 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
73562 struct rb_node **rb_link, *rb_parent;
73563 int retval;
73564- unsigned long charge;
73565- struct mempolicy *pol;
73566
73567 uprobe_start_dup_mmap();
73568 down_write(&oldmm->mmap_sem);
73569@@ -364,8 +432,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73570 mm->locked_vm = 0;
73571 mm->mmap = NULL;
73572 mm->mmap_cache = NULL;
73573- mm->free_area_cache = oldmm->mmap_base;
73574- mm->cached_hole_size = ~0UL;
73575+ mm->free_area_cache = oldmm->free_area_cache;
73576+ mm->cached_hole_size = oldmm->cached_hole_size;
73577 mm->map_count = 0;
73578 cpumask_clear(mm_cpumask(mm));
73579 mm->mm_rb = RB_ROOT;
73580@@ -381,57 +449,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73581
73582 prev = NULL;
73583 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
73584- struct file *file;
73585-
73586 if (mpnt->vm_flags & VM_DONTCOPY) {
73587 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
73588 -vma_pages(mpnt));
73589 continue;
73590 }
73591- charge = 0;
73592- if (mpnt->vm_flags & VM_ACCOUNT) {
73593- unsigned long len = vma_pages(mpnt);
73594-
73595- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
73596- goto fail_nomem;
73597- charge = len;
73598- }
73599- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
73600- if (!tmp)
73601- goto fail_nomem;
73602- *tmp = *mpnt;
73603- INIT_LIST_HEAD(&tmp->anon_vma_chain);
73604- pol = mpol_dup(vma_policy(mpnt));
73605- retval = PTR_ERR(pol);
73606- if (IS_ERR(pol))
73607- goto fail_nomem_policy;
73608- vma_set_policy(tmp, pol);
73609- tmp->vm_mm = mm;
73610- if (anon_vma_fork(tmp, mpnt))
73611- goto fail_nomem_anon_vma_fork;
73612- tmp->vm_flags &= ~VM_LOCKED;
73613- tmp->vm_next = tmp->vm_prev = NULL;
73614- file = tmp->vm_file;
73615- if (file) {
73616- struct inode *inode = file->f_path.dentry->d_inode;
73617- struct address_space *mapping = file->f_mapping;
73618-
73619- get_file(file);
73620- if (tmp->vm_flags & VM_DENYWRITE)
73621- atomic_dec(&inode->i_writecount);
73622- mutex_lock(&mapping->i_mmap_mutex);
73623- if (tmp->vm_flags & VM_SHARED)
73624- mapping->i_mmap_writable++;
73625- flush_dcache_mmap_lock(mapping);
73626- /* insert tmp into the share list, just after mpnt */
73627- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
73628- vma_nonlinear_insert(tmp,
73629- &mapping->i_mmap_nonlinear);
73630- else
73631- vma_interval_tree_insert_after(tmp, mpnt,
73632- &mapping->i_mmap);
73633- flush_dcache_mmap_unlock(mapping);
73634- mutex_unlock(&mapping->i_mmap_mutex);
73635+ tmp = dup_vma(mm, oldmm, mpnt);
73636+ if (!tmp) {
73637+ retval = -ENOMEM;
73638+ goto out;
73639 }
73640
73641 /*
73642@@ -463,6 +489,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
73643 if (retval)
73644 goto out;
73645 }
73646+
73647+#ifdef CONFIG_PAX_SEGMEXEC
73648+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
73649+ struct vm_area_struct *mpnt_m;
73650+
73651+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
73652+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
73653+
73654+ if (!mpnt->vm_mirror)
73655+ continue;
73656+
73657+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
73658+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
73659+ mpnt->vm_mirror = mpnt_m;
73660+ } else {
73661+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
73662+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
73663+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
73664+ mpnt->vm_mirror->vm_mirror = mpnt;
73665+ }
73666+ }
73667+ BUG_ON(mpnt_m);
73668+ }
73669+#endif
73670+
73671 /* a new mm has just been created */
73672 arch_dup_mmap(oldmm, mm);
73673 retval = 0;
73674@@ -472,14 +523,6 @@ out:
73675 up_write(&oldmm->mmap_sem);
73676 uprobe_end_dup_mmap();
73677 return retval;
73678-fail_nomem_anon_vma_fork:
73679- mpol_put(pol);
73680-fail_nomem_policy:
73681- kmem_cache_free(vm_area_cachep, tmp);
73682-fail_nomem:
73683- retval = -ENOMEM;
73684- vm_unacct_memory(charge);
73685- goto out;
73686 }
73687
73688 static inline int mm_alloc_pgd(struct mm_struct *mm)
73689@@ -694,8 +737,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
73690 return ERR_PTR(err);
73691
73692 mm = get_task_mm(task);
73693- if (mm && mm != current->mm &&
73694- !ptrace_may_access(task, mode)) {
73695+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
73696+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
73697 mmput(mm);
73698 mm = ERR_PTR(-EACCES);
73699 }
73700@@ -917,13 +960,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
73701 spin_unlock(&fs->lock);
73702 return -EAGAIN;
73703 }
73704- fs->users++;
73705+ atomic_inc(&fs->users);
73706 spin_unlock(&fs->lock);
73707 return 0;
73708 }
73709 tsk->fs = copy_fs_struct(fs);
73710 if (!tsk->fs)
73711 return -ENOMEM;
73712+ /* Carry through gr_chroot_dentry and is_chrooted instead
73713+ of recomputing it here. Already copied when the task struct
73714+ is duplicated. This allows pivot_root to not be treated as
73715+ a chroot
73716+ */
73717+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
73718+
73719 return 0;
73720 }
73721
73722@@ -1193,6 +1243,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
73723 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
73724 #endif
73725 retval = -EAGAIN;
73726+
73727+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
73728+
73729 if (atomic_read(&p->real_cred->user->processes) >=
73730 task_rlimit(p, RLIMIT_NPROC)) {
73731 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
73732@@ -1432,6 +1485,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
73733 goto bad_fork_free_pid;
73734 }
73735
73736+ /* synchronizes with gr_set_acls()
73737+ we need to call this past the point of no return for fork()
73738+ */
73739+ gr_copy_label(p);
73740+
73741 if (clone_flags & CLONE_THREAD) {
73742 current->signal->nr_threads++;
73743 atomic_inc(&current->signal->live);
73744@@ -1515,6 +1573,8 @@ bad_fork_cleanup_count:
73745 bad_fork_free:
73746 free_task(p);
73747 fork_out:
73748+ gr_log_forkfail(retval);
73749+
73750 return ERR_PTR(retval);
73751 }
73752
73753@@ -1565,6 +1625,23 @@ long do_fork(unsigned long clone_flags,
73754 return -EINVAL;
73755 }
73756
73757+#ifdef CONFIG_GRKERNSEC
73758+ if (clone_flags & CLONE_NEWUSER) {
73759+ /*
73760+ * This doesn't really inspire confidence:
73761+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
73762+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
73763+ * Increases kernel attack surface in areas developers
73764+ * previously cared little about ("low importance due
73765+ * to requiring "root" capability")
73766+ * To be removed when this code receives *proper* review
73767+ */
73768+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
73769+ !capable(CAP_SETGID))
73770+ return -EPERM;
73771+ }
73772+#endif
73773+
73774 /*
73775 * Determine whether and which event to report to ptracer. When
73776 * called from kernel_thread or CLONE_UNTRACED is explicitly
73777@@ -1599,6 +1676,8 @@ long do_fork(unsigned long clone_flags,
73778 if (clone_flags & CLONE_PARENT_SETTID)
73779 put_user(nr, parent_tidptr);
73780
73781+ gr_handle_brute_check();
73782+
73783 if (clone_flags & CLONE_VFORK) {
73784 p->vfork_done = &vfork;
73785 init_completion(&vfork);
73786@@ -1752,7 +1831,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
73787 return 0;
73788
73789 /* don't need lock here; in the worst case we'll do useless copy */
73790- if (fs->users == 1)
73791+ if (atomic_read(&fs->users) == 1)
73792 return 0;
73793
73794 *new_fsp = copy_fs_struct(fs);
73795@@ -1866,7 +1945,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
73796 fs = current->fs;
73797 spin_lock(&fs->lock);
73798 current->fs = new_fs;
73799- if (--fs->users)
73800+ gr_set_chroot_entries(current, &current->fs->root);
73801+ if (atomic_dec_return(&fs->users))
73802 new_fs = NULL;
73803 else
73804 new_fs = fs;
73805diff --git a/kernel/futex.c b/kernel/futex.c
73806index 8879430..31696f1 100644
73807--- a/kernel/futex.c
73808+++ b/kernel/futex.c
73809@@ -54,6 +54,7 @@
73810 #include <linux/mount.h>
73811 #include <linux/pagemap.h>
73812 #include <linux/syscalls.h>
73813+#include <linux/ptrace.h>
73814 #include <linux/signal.h>
73815 #include <linux/export.h>
73816 #include <linux/magic.h>
73817@@ -239,6 +240,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
73818 struct page *page, *page_head;
73819 int err, ro = 0;
73820
73821+#ifdef CONFIG_PAX_SEGMEXEC
73822+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
73823+ return -EFAULT;
73824+#endif
73825+
73826 /*
73827 * The futex address must be "naturally" aligned.
73828 */
73829@@ -2731,6 +2737,7 @@ static int __init futex_init(void)
73830 {
73831 u32 curval;
73832 int i;
73833+ mm_segment_t oldfs;
73834
73835 /*
73836 * This will fail and we want it. Some arch implementations do
73837@@ -2742,8 +2749,11 @@ static int __init futex_init(void)
73838 * implementation, the non-functional ones will return
73839 * -ENOSYS.
73840 */
73841+ oldfs = get_fs();
73842+ set_fs(USER_DS);
73843 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
73844 futex_cmpxchg_enabled = 1;
73845+ set_fs(oldfs);
73846
73847 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
73848 plist_head_init(&futex_queues[i].chain);
73849diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
73850index 9b22d03..6295b62 100644
73851--- a/kernel/gcov/base.c
73852+++ b/kernel/gcov/base.c
73853@@ -102,11 +102,6 @@ void gcov_enable_events(void)
73854 }
73855
73856 #ifdef CONFIG_MODULES
73857-static inline int within(void *addr, void *start, unsigned long size)
73858-{
73859- return ((addr >= start) && (addr < start + size));
73860-}
73861-
73862 /* Update list and generate events when modules are unloaded. */
73863 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
73864 void *data)
73865@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
73866 prev = NULL;
73867 /* Remove entries located in module from linked list. */
73868 for (info = gcov_info_head; info; info = info->next) {
73869- if (within(info, mod->module_core, mod->core_size)) {
73870+ if (within_module_core_rw((unsigned long)info, mod)) {
73871 if (prev)
73872 prev->next = info->next;
73873 else
73874diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
73875index cdd5607..c3fc919 100644
73876--- a/kernel/hrtimer.c
73877+++ b/kernel/hrtimer.c
73878@@ -1407,7 +1407,7 @@ void hrtimer_peek_ahead_timers(void)
73879 local_irq_restore(flags);
73880 }
73881
73882-static void run_hrtimer_softirq(struct softirq_action *h)
73883+static void run_hrtimer_softirq(void)
73884 {
73885 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
73886
73887@@ -1751,7 +1751,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
73888 return NOTIFY_OK;
73889 }
73890
73891-static struct notifier_block __cpuinitdata hrtimers_nb = {
73892+static struct notifier_block hrtimers_nb = {
73893 .notifier_call = hrtimer_cpu_notify,
73894 };
73895
73896diff --git a/kernel/jump_label.c b/kernel/jump_label.c
73897index 60f48fa..7f3a770 100644
73898--- a/kernel/jump_label.c
73899+++ b/kernel/jump_label.c
73900@@ -13,6 +13,7 @@
73901 #include <linux/sort.h>
73902 #include <linux/err.h>
73903 #include <linux/static_key.h>
73904+#include <linux/mm.h>
73905
73906 #ifdef HAVE_JUMP_LABEL
73907
73908@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
73909
73910 size = (((unsigned long)stop - (unsigned long)start)
73911 / sizeof(struct jump_entry));
73912+ pax_open_kernel();
73913 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
73914+ pax_close_kernel();
73915 }
73916
73917 static void jump_label_update(struct static_key *key, int enable);
73918@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
73919 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
73920 struct jump_entry *iter;
73921
73922+ pax_open_kernel();
73923 for (iter = iter_start; iter < iter_stop; iter++) {
73924 if (within_module_init(iter->code, mod))
73925 iter->code = 0;
73926 }
73927+ pax_close_kernel();
73928 }
73929
73930 static int
73931diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
73932index 2169fee..706ccca 100644
73933--- a/kernel/kallsyms.c
73934+++ b/kernel/kallsyms.c
73935@@ -11,6 +11,9 @@
73936 * Changed the compression method from stem compression to "table lookup"
73937 * compression (see scripts/kallsyms.c for a more complete description)
73938 */
73939+#ifdef CONFIG_GRKERNSEC_HIDESYM
73940+#define __INCLUDED_BY_HIDESYM 1
73941+#endif
73942 #include <linux/kallsyms.h>
73943 #include <linux/module.h>
73944 #include <linux/init.h>
73945@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
73946
73947 static inline int is_kernel_inittext(unsigned long addr)
73948 {
73949+ if (system_state != SYSTEM_BOOTING)
73950+ return 0;
73951+
73952 if (addr >= (unsigned long)_sinittext
73953 && addr <= (unsigned long)_einittext)
73954 return 1;
73955 return 0;
73956 }
73957
73958+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73959+#ifdef CONFIG_MODULES
73960+static inline int is_module_text(unsigned long addr)
73961+{
73962+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
73963+ return 1;
73964+
73965+ addr = ktla_ktva(addr);
73966+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
73967+}
73968+#else
73969+static inline int is_module_text(unsigned long addr)
73970+{
73971+ return 0;
73972+}
73973+#endif
73974+#endif
73975+
73976 static inline int is_kernel_text(unsigned long addr)
73977 {
73978 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
73979@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
73980
73981 static inline int is_kernel(unsigned long addr)
73982 {
73983+
73984+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
73985+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
73986+ return 1;
73987+
73988+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
73989+#else
73990 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
73991+#endif
73992+
73993 return 1;
73994 return in_gate_area_no_mm(addr);
73995 }
73996
73997 static int is_ksym_addr(unsigned long addr)
73998 {
73999+
74000+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
74001+ if (is_module_text(addr))
74002+ return 0;
74003+#endif
74004+
74005 if (all_var)
74006 return is_kernel(addr);
74007
74008@@ -470,7 +509,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
74009
74010 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
74011 {
74012- iter->name[0] = '\0';
74013 iter->nameoff = get_symbol_offset(new_pos);
74014 iter->pos = new_pos;
74015 }
74016@@ -518,6 +556,11 @@ static int s_show(struct seq_file *m, void *p)
74017 {
74018 struct kallsym_iter *iter = m->private;
74019
74020+#ifdef CONFIG_GRKERNSEC_HIDESYM
74021+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
74022+ return 0;
74023+#endif
74024+
74025 /* Some debugging symbols have no name. Ignore them. */
74026 if (!iter->name[0])
74027 return 0;
74028@@ -531,6 +574,7 @@ static int s_show(struct seq_file *m, void *p)
74029 */
74030 type = iter->exported ? toupper(iter->type) :
74031 tolower(iter->type);
74032+
74033 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
74034 type, iter->name, iter->module_name);
74035 } else
74036@@ -556,7 +600,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
74037 struct kallsym_iter *iter;
74038 int ret;
74039
74040- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
74041+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
74042 if (!iter)
74043 return -ENOMEM;
74044 reset_iter(iter, 0);
74045diff --git a/kernel/kcmp.c b/kernel/kcmp.c
74046index e30ac0f..3528cac 100644
74047--- a/kernel/kcmp.c
74048+++ b/kernel/kcmp.c
74049@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
74050 struct task_struct *task1, *task2;
74051 int ret;
74052
74053+#ifdef CONFIG_GRKERNSEC
74054+ return -ENOSYS;
74055+#endif
74056+
74057 rcu_read_lock();
74058
74059 /*
74060diff --git a/kernel/kexec.c b/kernel/kexec.c
74061index 5e4bd78..00c5b91 100644
74062--- a/kernel/kexec.c
74063+++ b/kernel/kexec.c
74064@@ -1045,7 +1045,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
74065 unsigned long flags)
74066 {
74067 struct compat_kexec_segment in;
74068- struct kexec_segment out, __user *ksegments;
74069+ struct kexec_segment out;
74070+ struct kexec_segment __user *ksegments;
74071 unsigned long i, result;
74072
74073 /* Don't allow clients that don't understand the native
74074diff --git a/kernel/kmod.c b/kernel/kmod.c
74075index 0023a87..9c0c068 100644
74076--- a/kernel/kmod.c
74077+++ b/kernel/kmod.c
74078@@ -74,7 +74,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
74079 kfree(info->argv);
74080 }
74081
74082-static int call_modprobe(char *module_name, int wait)
74083+static int call_modprobe(char *module_name, char *module_param, int wait)
74084 {
74085 static char *envp[] = {
74086 "HOME=/",
74087@@ -83,7 +83,7 @@ static int call_modprobe(char *module_name, int wait)
74088 NULL
74089 };
74090
74091- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
74092+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
74093 if (!argv)
74094 goto out;
74095
74096@@ -95,7 +95,8 @@ static int call_modprobe(char *module_name, int wait)
74097 argv[1] = "-q";
74098 argv[2] = "--";
74099 argv[3] = module_name; /* check free_modprobe_argv() */
74100- argv[4] = NULL;
74101+ argv[4] = module_param;
74102+ argv[5] = NULL;
74103
74104 return call_usermodehelper_fns(modprobe_path, argv, envp,
74105 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL);
74106@@ -120,9 +121,8 @@ out:
74107 * If module auto-loading support is disabled then this function
74108 * becomes a no-operation.
74109 */
74110-int __request_module(bool wait, const char *fmt, ...)
74111+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
74112 {
74113- va_list args;
74114 char module_name[MODULE_NAME_LEN];
74115 unsigned int max_modprobes;
74116 int ret;
74117@@ -130,9 +130,7 @@ int __request_module(bool wait, const char *fmt, ...)
74118 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
74119 static int kmod_loop_msg;
74120
74121- va_start(args, fmt);
74122- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
74123- va_end(args);
74124+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
74125 if (ret >= MODULE_NAME_LEN)
74126 return -ENAMETOOLONG;
74127
74128@@ -140,6 +138,20 @@ int __request_module(bool wait, const char *fmt, ...)
74129 if (ret)
74130 return ret;
74131
74132+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74133+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
74134+ /* hack to workaround consolekit/udisks stupidity */
74135+ read_lock(&tasklist_lock);
74136+ if (!strcmp(current->comm, "mount") &&
74137+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
74138+ read_unlock(&tasklist_lock);
74139+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
74140+ return -EPERM;
74141+ }
74142+ read_unlock(&tasklist_lock);
74143+ }
74144+#endif
74145+
74146 /* If modprobe needs a service that is in a module, we get a recursive
74147 * loop. Limit the number of running kmod threads to max_threads/2 or
74148 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
74149@@ -168,11 +180,52 @@ int __request_module(bool wait, const char *fmt, ...)
74150
74151 trace_module_request(module_name, wait, _RET_IP_);
74152
74153- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
74154+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
74155
74156 atomic_dec(&kmod_concurrent);
74157 return ret;
74158 }
74159+
74160+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
74161+{
74162+ va_list args;
74163+ int ret;
74164+
74165+ va_start(args, fmt);
74166+ ret = ____request_module(wait, module_param, fmt, args);
74167+ va_end(args);
74168+
74169+ return ret;
74170+}
74171+
74172+int __request_module(bool wait, const char *fmt, ...)
74173+{
74174+ va_list args;
74175+ int ret;
74176+
74177+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74178+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
74179+ char module_param[MODULE_NAME_LEN];
74180+
74181+ memset(module_param, 0, sizeof(module_param));
74182+
74183+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
74184+
74185+ va_start(args, fmt);
74186+ ret = ____request_module(wait, module_param, fmt, args);
74187+ va_end(args);
74188+
74189+ return ret;
74190+ }
74191+#endif
74192+
74193+ va_start(args, fmt);
74194+ ret = ____request_module(wait, NULL, fmt, args);
74195+ va_end(args);
74196+
74197+ return ret;
74198+}
74199+
74200 EXPORT_SYMBOL(__request_module);
74201 #endif /* CONFIG_MODULES */
74202
74203@@ -283,7 +336,7 @@ static int wait_for_helper(void *data)
74204 *
74205 * Thus the __user pointer cast is valid here.
74206 */
74207- sys_wait4(pid, (int __user *)&ret, 0, NULL);
74208+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
74209
74210 /*
74211 * If ret is 0, either ____call_usermodehelper failed and the
74212@@ -635,7 +688,7 @@ EXPORT_SYMBOL(call_usermodehelper_fns);
74213 static int proc_cap_handler(struct ctl_table *table, int write,
74214 void __user *buffer, size_t *lenp, loff_t *ppos)
74215 {
74216- struct ctl_table t;
74217+ ctl_table_no_const t;
74218 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
74219 kernel_cap_t new_cap;
74220 int err, i;
74221diff --git a/kernel/kprobes.c b/kernel/kprobes.c
74222index 098f396..fe85ff1 100644
74223--- a/kernel/kprobes.c
74224+++ b/kernel/kprobes.c
74225@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
74226 * kernel image and loaded module images reside. This is required
74227 * so x86_64 can correctly handle the %rip-relative fixups.
74228 */
74229- kip->insns = module_alloc(PAGE_SIZE);
74230+ kip->insns = module_alloc_exec(PAGE_SIZE);
74231 if (!kip->insns) {
74232 kfree(kip);
74233 return NULL;
74234@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
74235 */
74236 if (!list_is_singular(&kip->list)) {
74237 list_del(&kip->list);
74238- module_free(NULL, kip->insns);
74239+ module_free_exec(NULL, kip->insns);
74240 kfree(kip);
74241 }
74242 return 1;
74243@@ -2063,7 +2063,7 @@ static int __init init_kprobes(void)
74244 {
74245 int i, err = 0;
74246 unsigned long offset = 0, size = 0;
74247- char *modname, namebuf[128];
74248+ char *modname, namebuf[KSYM_NAME_LEN];
74249 const char *symbol_name;
74250 void *addr;
74251 struct kprobe_blackpoint *kb;
74252@@ -2148,11 +2148,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
74253 kprobe_type = "k";
74254
74255 if (sym)
74256- seq_printf(pi, "%p %s %s+0x%x %s ",
74257+ seq_printf(pi, "%pK %s %s+0x%x %s ",
74258 p->addr, kprobe_type, sym, offset,
74259 (modname ? modname : " "));
74260 else
74261- seq_printf(pi, "%p %s %p ",
74262+ seq_printf(pi, "%pK %s %pK ",
74263 p->addr, kprobe_type, p->addr);
74264
74265 if (!pp)
74266@@ -2190,7 +2190,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
74267 const char *sym = NULL;
74268 unsigned int i = *(loff_t *) v;
74269 unsigned long offset = 0;
74270- char *modname, namebuf[128];
74271+ char *modname, namebuf[KSYM_NAME_LEN];
74272
74273 head = &kprobe_table[i];
74274 preempt_disable();
74275diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
74276index 6ada93c..dce7d5d 100644
74277--- a/kernel/ksysfs.c
74278+++ b/kernel/ksysfs.c
74279@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
74280 {
74281 if (count+1 > UEVENT_HELPER_PATH_LEN)
74282 return -ENOENT;
74283+ if (!capable(CAP_SYS_ADMIN))
74284+ return -EPERM;
74285 memcpy(uevent_helper, buf, count);
74286 uevent_helper[count] = '\0';
74287 if (count && uevent_helper[count-1] == '\n')
74288@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
74289 return count;
74290 }
74291
74292-static struct bin_attribute notes_attr = {
74293+static bin_attribute_no_const notes_attr __read_only = {
74294 .attr = {
74295 .name = "notes",
74296 .mode = S_IRUGO,
74297diff --git a/kernel/lockdep.c b/kernel/lockdep.c
74298index 7981e5b..7f2105c 100644
74299--- a/kernel/lockdep.c
74300+++ b/kernel/lockdep.c
74301@@ -590,6 +590,10 @@ static int static_obj(void *obj)
74302 end = (unsigned long) &_end,
74303 addr = (unsigned long) obj;
74304
74305+#ifdef CONFIG_PAX_KERNEXEC
74306+ start = ktla_ktva(start);
74307+#endif
74308+
74309 /*
74310 * static variable?
74311 */
74312@@ -730,6 +734,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
74313 if (!static_obj(lock->key)) {
74314 debug_locks_off();
74315 printk("INFO: trying to register non-static key.\n");
74316+ printk("lock:%pS key:%pS.\n", lock, lock->key);
74317 printk("the code is fine but needs lockdep annotation.\n");
74318 printk("turning off the locking correctness validator.\n");
74319 dump_stack();
74320@@ -3078,7 +3083,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
74321 if (!class)
74322 return 0;
74323 }
74324- atomic_inc((atomic_t *)&class->ops);
74325+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
74326 if (very_verbose(class)) {
74327 printk("\nacquire class [%p] %s", class->key, class->name);
74328 if (class->name_version > 1)
74329diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
74330index b2c71c5..7b88d63 100644
74331--- a/kernel/lockdep_proc.c
74332+++ b/kernel/lockdep_proc.c
74333@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
74334 return 0;
74335 }
74336
74337- seq_printf(m, "%p", class->key);
74338+ seq_printf(m, "%pK", class->key);
74339 #ifdef CONFIG_DEBUG_LOCKDEP
74340 seq_printf(m, " OPS:%8ld", class->ops);
74341 #endif
74342@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
74343
74344 list_for_each_entry(entry, &class->locks_after, entry) {
74345 if (entry->distance == 1) {
74346- seq_printf(m, " -> [%p] ", entry->class->key);
74347+ seq_printf(m, " -> [%pK] ", entry->class->key);
74348 print_name(m, entry->class);
74349 seq_puts(m, "\n");
74350 }
74351@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
74352 if (!class->key)
74353 continue;
74354
74355- seq_printf(m, "[%p] ", class->key);
74356+ seq_printf(m, "[%pK] ", class->key);
74357 print_name(m, class);
74358 seq_puts(m, "\n");
74359 }
74360@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
74361 if (!i)
74362 seq_line(m, '-', 40-namelen, namelen);
74363
74364- snprintf(ip, sizeof(ip), "[<%p>]",
74365+ snprintf(ip, sizeof(ip), "[<%pK>]",
74366 (void *)class->contention_point[i]);
74367 seq_printf(m, "%40s %14lu %29s %pS\n",
74368 name, stats->contention_point[i],
74369@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
74370 if (!i)
74371 seq_line(m, '-', 40-namelen, namelen);
74372
74373- snprintf(ip, sizeof(ip), "[<%p>]",
74374+ snprintf(ip, sizeof(ip), "[<%pK>]",
74375 (void *)class->contending_point[i]);
74376 seq_printf(m, "%40s %14lu %29s %pS\n",
74377 name, stats->contending_point[i],
74378diff --git a/kernel/module.c b/kernel/module.c
74379index eab0827..f488603 100644
74380--- a/kernel/module.c
74381+++ b/kernel/module.c
74382@@ -61,6 +61,7 @@
74383 #include <linux/pfn.h>
74384 #include <linux/bsearch.h>
74385 #include <linux/fips.h>
74386+#include <linux/grsecurity.h>
74387 #include <uapi/linux/module.h>
74388 #include "module-internal.h"
74389
74390@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
74391
74392 /* Bounds of module allocation, for speeding __module_address.
74393 * Protected by module_mutex. */
74394-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
74395+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
74396+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
74397
74398 int register_module_notifier(struct notifier_block * nb)
74399 {
74400@@ -322,7 +324,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
74401 return true;
74402
74403 list_for_each_entry_rcu(mod, &modules, list) {
74404- struct symsearch arr[] = {
74405+ struct symsearch modarr[] = {
74406 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
74407 NOT_GPL_ONLY, false },
74408 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
74409@@ -347,7 +349,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
74410 if (mod->state == MODULE_STATE_UNFORMED)
74411 continue;
74412
74413- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
74414+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
74415 return true;
74416 }
74417 return false;
74418@@ -484,7 +486,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
74419 static int percpu_modalloc(struct module *mod,
74420 unsigned long size, unsigned long align)
74421 {
74422- if (align > PAGE_SIZE) {
74423+ if (align-1 >= PAGE_SIZE) {
74424 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
74425 mod->name, align, PAGE_SIZE);
74426 align = PAGE_SIZE;
74427@@ -1088,7 +1090,7 @@ struct module_attribute module_uevent =
74428 static ssize_t show_coresize(struct module_attribute *mattr,
74429 struct module_kobject *mk, char *buffer)
74430 {
74431- return sprintf(buffer, "%u\n", mk->mod->core_size);
74432+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
74433 }
74434
74435 static struct module_attribute modinfo_coresize =
74436@@ -1097,7 +1099,7 @@ static struct module_attribute modinfo_coresize =
74437 static ssize_t show_initsize(struct module_attribute *mattr,
74438 struct module_kobject *mk, char *buffer)
74439 {
74440- return sprintf(buffer, "%u\n", mk->mod->init_size);
74441+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
74442 }
74443
74444 static struct module_attribute modinfo_initsize =
74445@@ -1311,7 +1313,7 @@ resolve_symbol_wait(struct module *mod,
74446 */
74447 #ifdef CONFIG_SYSFS
74448
74449-#ifdef CONFIG_KALLSYMS
74450+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
74451 static inline bool sect_empty(const Elf_Shdr *sect)
74452 {
74453 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
74454@@ -1451,7 +1453,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
74455 {
74456 unsigned int notes, loaded, i;
74457 struct module_notes_attrs *notes_attrs;
74458- struct bin_attribute *nattr;
74459+ bin_attribute_no_const *nattr;
74460
74461 /* failed to create section attributes, so can't create notes */
74462 if (!mod->sect_attrs)
74463@@ -1563,7 +1565,7 @@ static void del_usage_links(struct module *mod)
74464 static int module_add_modinfo_attrs(struct module *mod)
74465 {
74466 struct module_attribute *attr;
74467- struct module_attribute *temp_attr;
74468+ module_attribute_no_const *temp_attr;
74469 int error = 0;
74470 int i;
74471
74472@@ -1777,21 +1779,21 @@ static void set_section_ro_nx(void *base,
74473
74474 static void unset_module_core_ro_nx(struct module *mod)
74475 {
74476- set_page_attributes(mod->module_core + mod->core_text_size,
74477- mod->module_core + mod->core_size,
74478+ set_page_attributes(mod->module_core_rw,
74479+ mod->module_core_rw + mod->core_size_rw,
74480 set_memory_x);
74481- set_page_attributes(mod->module_core,
74482- mod->module_core + mod->core_ro_size,
74483+ set_page_attributes(mod->module_core_rx,
74484+ mod->module_core_rx + mod->core_size_rx,
74485 set_memory_rw);
74486 }
74487
74488 static void unset_module_init_ro_nx(struct module *mod)
74489 {
74490- set_page_attributes(mod->module_init + mod->init_text_size,
74491- mod->module_init + mod->init_size,
74492+ set_page_attributes(mod->module_init_rw,
74493+ mod->module_init_rw + mod->init_size_rw,
74494 set_memory_x);
74495- set_page_attributes(mod->module_init,
74496- mod->module_init + mod->init_ro_size,
74497+ set_page_attributes(mod->module_init_rx,
74498+ mod->module_init_rx + mod->init_size_rx,
74499 set_memory_rw);
74500 }
74501
74502@@ -1804,14 +1806,14 @@ void set_all_modules_text_rw(void)
74503 list_for_each_entry_rcu(mod, &modules, list) {
74504 if (mod->state == MODULE_STATE_UNFORMED)
74505 continue;
74506- if ((mod->module_core) && (mod->core_text_size)) {
74507- set_page_attributes(mod->module_core,
74508- mod->module_core + mod->core_text_size,
74509+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
74510+ set_page_attributes(mod->module_core_rx,
74511+ mod->module_core_rx + mod->core_size_rx,
74512 set_memory_rw);
74513 }
74514- if ((mod->module_init) && (mod->init_text_size)) {
74515- set_page_attributes(mod->module_init,
74516- mod->module_init + mod->init_text_size,
74517+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
74518+ set_page_attributes(mod->module_init_rx,
74519+ mod->module_init_rx + mod->init_size_rx,
74520 set_memory_rw);
74521 }
74522 }
74523@@ -1827,14 +1829,14 @@ void set_all_modules_text_ro(void)
74524 list_for_each_entry_rcu(mod, &modules, list) {
74525 if (mod->state == MODULE_STATE_UNFORMED)
74526 continue;
74527- if ((mod->module_core) && (mod->core_text_size)) {
74528- set_page_attributes(mod->module_core,
74529- mod->module_core + mod->core_text_size,
74530+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
74531+ set_page_attributes(mod->module_core_rx,
74532+ mod->module_core_rx + mod->core_size_rx,
74533 set_memory_ro);
74534 }
74535- if ((mod->module_init) && (mod->init_text_size)) {
74536- set_page_attributes(mod->module_init,
74537- mod->module_init + mod->init_text_size,
74538+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
74539+ set_page_attributes(mod->module_init_rx,
74540+ mod->module_init_rx + mod->init_size_rx,
74541 set_memory_ro);
74542 }
74543 }
74544@@ -1880,16 +1882,19 @@ static void free_module(struct module *mod)
74545
74546 /* This may be NULL, but that's OK */
74547 unset_module_init_ro_nx(mod);
74548- module_free(mod, mod->module_init);
74549+ module_free(mod, mod->module_init_rw);
74550+ module_free_exec(mod, mod->module_init_rx);
74551 kfree(mod->args);
74552 percpu_modfree(mod);
74553
74554 /* Free lock-classes: */
74555- lockdep_free_key_range(mod->module_core, mod->core_size);
74556+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
74557+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
74558
74559 /* Finally, free the core (containing the module structure) */
74560 unset_module_core_ro_nx(mod);
74561- module_free(mod, mod->module_core);
74562+ module_free_exec(mod, mod->module_core_rx);
74563+ module_free(mod, mod->module_core_rw);
74564
74565 #ifdef CONFIG_MPU
74566 update_protections(current->mm);
74567@@ -1959,9 +1964,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
74568 int ret = 0;
74569 const struct kernel_symbol *ksym;
74570
74571+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74572+ int is_fs_load = 0;
74573+ int register_filesystem_found = 0;
74574+ char *p;
74575+
74576+ p = strstr(mod->args, "grsec_modharden_fs");
74577+ if (p) {
74578+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
74579+ /* copy \0 as well */
74580+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
74581+ is_fs_load = 1;
74582+ }
74583+#endif
74584+
74585 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
74586 const char *name = info->strtab + sym[i].st_name;
74587
74588+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74589+ /* it's a real shame this will never get ripped and copied
74590+ upstream! ;(
74591+ */
74592+ if (is_fs_load && !strcmp(name, "register_filesystem"))
74593+ register_filesystem_found = 1;
74594+#endif
74595+
74596 switch (sym[i].st_shndx) {
74597 case SHN_COMMON:
74598 /* We compiled with -fno-common. These are not
74599@@ -1982,7 +2009,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
74600 ksym = resolve_symbol_wait(mod, info, name);
74601 /* Ok if resolved. */
74602 if (ksym && !IS_ERR(ksym)) {
74603+ pax_open_kernel();
74604 sym[i].st_value = ksym->value;
74605+ pax_close_kernel();
74606 break;
74607 }
74608
74609@@ -2001,11 +2030,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
74610 secbase = (unsigned long)mod_percpu(mod);
74611 else
74612 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
74613+ pax_open_kernel();
74614 sym[i].st_value += secbase;
74615+ pax_close_kernel();
74616 break;
74617 }
74618 }
74619
74620+#ifdef CONFIG_GRKERNSEC_MODHARDEN
74621+ if (is_fs_load && !register_filesystem_found) {
74622+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
74623+ ret = -EPERM;
74624+ }
74625+#endif
74626+
74627 return ret;
74628 }
74629
74630@@ -2089,22 +2127,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
74631 || s->sh_entsize != ~0UL
74632 || strstarts(sname, ".init"))
74633 continue;
74634- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
74635+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
74636+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
74637+ else
74638+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
74639 pr_debug("\t%s\n", sname);
74640 }
74641- switch (m) {
74642- case 0: /* executable */
74643- mod->core_size = debug_align(mod->core_size);
74644- mod->core_text_size = mod->core_size;
74645- break;
74646- case 1: /* RO: text and ro-data */
74647- mod->core_size = debug_align(mod->core_size);
74648- mod->core_ro_size = mod->core_size;
74649- break;
74650- case 3: /* whole core */
74651- mod->core_size = debug_align(mod->core_size);
74652- break;
74653- }
74654 }
74655
74656 pr_debug("Init section allocation order:\n");
74657@@ -2118,23 +2146,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
74658 || s->sh_entsize != ~0UL
74659 || !strstarts(sname, ".init"))
74660 continue;
74661- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
74662- | INIT_OFFSET_MASK);
74663+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
74664+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
74665+ else
74666+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
74667+ s->sh_entsize |= INIT_OFFSET_MASK;
74668 pr_debug("\t%s\n", sname);
74669 }
74670- switch (m) {
74671- case 0: /* executable */
74672- mod->init_size = debug_align(mod->init_size);
74673- mod->init_text_size = mod->init_size;
74674- break;
74675- case 1: /* RO: text and ro-data */
74676- mod->init_size = debug_align(mod->init_size);
74677- mod->init_ro_size = mod->init_size;
74678- break;
74679- case 3: /* whole init */
74680- mod->init_size = debug_align(mod->init_size);
74681- break;
74682- }
74683 }
74684 }
74685
74686@@ -2306,7 +2324,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
74687
74688 /* Put symbol section at end of init part of module. */
74689 symsect->sh_flags |= SHF_ALLOC;
74690- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
74691+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
74692 info->index.sym) | INIT_OFFSET_MASK;
74693 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
74694
74695@@ -2323,13 +2341,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
74696 }
74697
74698 /* Append room for core symbols at end of core part. */
74699- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
74700- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
74701- mod->core_size += strtab_size;
74702+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
74703+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
74704+ mod->core_size_rx += strtab_size;
74705
74706 /* Put string table section at end of init part of module. */
74707 strsect->sh_flags |= SHF_ALLOC;
74708- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
74709+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
74710 info->index.str) | INIT_OFFSET_MASK;
74711 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
74712 }
74713@@ -2347,12 +2365,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
74714 /* Make sure we get permanent strtab: don't use info->strtab. */
74715 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
74716
74717+ pax_open_kernel();
74718+
74719 /* Set types up while we still have access to sections. */
74720 for (i = 0; i < mod->num_symtab; i++)
74721 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
74722
74723- mod->core_symtab = dst = mod->module_core + info->symoffs;
74724- mod->core_strtab = s = mod->module_core + info->stroffs;
74725+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
74726+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
74727 src = mod->symtab;
74728 for (ndst = i = 0; i < mod->num_symtab; i++) {
74729 if (i == 0 ||
74730@@ -2364,6 +2384,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
74731 }
74732 }
74733 mod->core_num_syms = ndst;
74734+
74735+ pax_close_kernel();
74736 }
74737 #else
74738 static inline void layout_symtab(struct module *mod, struct load_info *info)
74739@@ -2397,17 +2419,33 @@ void * __weak module_alloc(unsigned long size)
74740 return vmalloc_exec(size);
74741 }
74742
74743-static void *module_alloc_update_bounds(unsigned long size)
74744+static void *module_alloc_update_bounds_rw(unsigned long size)
74745 {
74746 void *ret = module_alloc(size);
74747
74748 if (ret) {
74749 mutex_lock(&module_mutex);
74750 /* Update module bounds. */
74751- if ((unsigned long)ret < module_addr_min)
74752- module_addr_min = (unsigned long)ret;
74753- if ((unsigned long)ret + size > module_addr_max)
74754- module_addr_max = (unsigned long)ret + size;
74755+ if ((unsigned long)ret < module_addr_min_rw)
74756+ module_addr_min_rw = (unsigned long)ret;
74757+ if ((unsigned long)ret + size > module_addr_max_rw)
74758+ module_addr_max_rw = (unsigned long)ret + size;
74759+ mutex_unlock(&module_mutex);
74760+ }
74761+ return ret;
74762+}
74763+
74764+static void *module_alloc_update_bounds_rx(unsigned long size)
74765+{
74766+ void *ret = module_alloc_exec(size);
74767+
74768+ if (ret) {
74769+ mutex_lock(&module_mutex);
74770+ /* Update module bounds. */
74771+ if ((unsigned long)ret < module_addr_min_rx)
74772+ module_addr_min_rx = (unsigned long)ret;
74773+ if ((unsigned long)ret + size > module_addr_max_rx)
74774+ module_addr_max_rx = (unsigned long)ret + size;
74775 mutex_unlock(&module_mutex);
74776 }
74777 return ret;
74778@@ -2683,8 +2721,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
74779 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
74780 {
74781 const char *modmagic = get_modinfo(info, "vermagic");
74782+ const char *license = get_modinfo(info, "license");
74783 int err;
74784
74785+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
74786+ if (!license || !license_is_gpl_compatible(license))
74787+ return -ENOEXEC;
74788+#endif
74789+
74790 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
74791 modmagic = NULL;
74792
74793@@ -2710,7 +2754,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
74794 }
74795
74796 /* Set up license info based on the info section */
74797- set_license(mod, get_modinfo(info, "license"));
74798+ set_license(mod, license);
74799
74800 return 0;
74801 }
74802@@ -2804,7 +2848,7 @@ static int move_module(struct module *mod, struct load_info *info)
74803 void *ptr;
74804
74805 /* Do the allocs. */
74806- ptr = module_alloc_update_bounds(mod->core_size);
74807+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
74808 /*
74809 * The pointer to this block is stored in the module structure
74810 * which is inside the block. Just mark it as not being a
74811@@ -2814,11 +2858,11 @@ static int move_module(struct module *mod, struct load_info *info)
74812 if (!ptr)
74813 return -ENOMEM;
74814
74815- memset(ptr, 0, mod->core_size);
74816- mod->module_core = ptr;
74817+ memset(ptr, 0, mod->core_size_rw);
74818+ mod->module_core_rw = ptr;
74819
74820- if (mod->init_size) {
74821- ptr = module_alloc_update_bounds(mod->init_size);
74822+ if (mod->init_size_rw) {
74823+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
74824 /*
74825 * The pointer to this block is stored in the module structure
74826 * which is inside the block. This block doesn't need to be
74827@@ -2827,13 +2871,45 @@ static int move_module(struct module *mod, struct load_info *info)
74828 */
74829 kmemleak_ignore(ptr);
74830 if (!ptr) {
74831- module_free(mod, mod->module_core);
74832+ module_free(mod, mod->module_core_rw);
74833 return -ENOMEM;
74834 }
74835- memset(ptr, 0, mod->init_size);
74836- mod->module_init = ptr;
74837+ memset(ptr, 0, mod->init_size_rw);
74838+ mod->module_init_rw = ptr;
74839 } else
74840- mod->module_init = NULL;
74841+ mod->module_init_rw = NULL;
74842+
74843+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
74844+ kmemleak_not_leak(ptr);
74845+ if (!ptr) {
74846+ if (mod->module_init_rw)
74847+ module_free(mod, mod->module_init_rw);
74848+ module_free(mod, mod->module_core_rw);
74849+ return -ENOMEM;
74850+ }
74851+
74852+ pax_open_kernel();
74853+ memset(ptr, 0, mod->core_size_rx);
74854+ pax_close_kernel();
74855+ mod->module_core_rx = ptr;
74856+
74857+ if (mod->init_size_rx) {
74858+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
74859+ kmemleak_ignore(ptr);
74860+ if (!ptr && mod->init_size_rx) {
74861+ module_free_exec(mod, mod->module_core_rx);
74862+ if (mod->module_init_rw)
74863+ module_free(mod, mod->module_init_rw);
74864+ module_free(mod, mod->module_core_rw);
74865+ return -ENOMEM;
74866+ }
74867+
74868+ pax_open_kernel();
74869+ memset(ptr, 0, mod->init_size_rx);
74870+ pax_close_kernel();
74871+ mod->module_init_rx = ptr;
74872+ } else
74873+ mod->module_init_rx = NULL;
74874
74875 /* Transfer each section which specifies SHF_ALLOC */
74876 pr_debug("final section addresses:\n");
74877@@ -2844,16 +2920,45 @@ static int move_module(struct module *mod, struct load_info *info)
74878 if (!(shdr->sh_flags & SHF_ALLOC))
74879 continue;
74880
74881- if (shdr->sh_entsize & INIT_OFFSET_MASK)
74882- dest = mod->module_init
74883- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
74884- else
74885- dest = mod->module_core + shdr->sh_entsize;
74886+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
74887+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
74888+ dest = mod->module_init_rw
74889+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
74890+ else
74891+ dest = mod->module_init_rx
74892+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
74893+ } else {
74894+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
74895+ dest = mod->module_core_rw + shdr->sh_entsize;
74896+ else
74897+ dest = mod->module_core_rx + shdr->sh_entsize;
74898+ }
74899+
74900+ if (shdr->sh_type != SHT_NOBITS) {
74901+
74902+#ifdef CONFIG_PAX_KERNEXEC
74903+#ifdef CONFIG_X86_64
74904+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
74905+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
74906+#endif
74907+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
74908+ pax_open_kernel();
74909+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
74910+ pax_close_kernel();
74911+ } else
74912+#endif
74913
74914- if (shdr->sh_type != SHT_NOBITS)
74915 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
74916+ }
74917 /* Update sh_addr to point to copy in image. */
74918- shdr->sh_addr = (unsigned long)dest;
74919+
74920+#ifdef CONFIG_PAX_KERNEXEC
74921+ if (shdr->sh_flags & SHF_EXECINSTR)
74922+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
74923+ else
74924+#endif
74925+
74926+ shdr->sh_addr = (unsigned long)dest;
74927 pr_debug("\t0x%lx %s\n",
74928 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
74929 }
74930@@ -2908,12 +3013,12 @@ static void flush_module_icache(const struct module *mod)
74931 * Do it before processing of module parameters, so the module
74932 * can provide parameter accessor functions of its own.
74933 */
74934- if (mod->module_init)
74935- flush_icache_range((unsigned long)mod->module_init,
74936- (unsigned long)mod->module_init
74937- + mod->init_size);
74938- flush_icache_range((unsigned long)mod->module_core,
74939- (unsigned long)mod->module_core + mod->core_size);
74940+ if (mod->module_init_rx)
74941+ flush_icache_range((unsigned long)mod->module_init_rx,
74942+ (unsigned long)mod->module_init_rx
74943+ + mod->init_size_rx);
74944+ flush_icache_range((unsigned long)mod->module_core_rx,
74945+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
74946
74947 set_fs(old_fs);
74948 }
74949@@ -2983,8 +3088,10 @@ out:
74950 static void module_deallocate(struct module *mod, struct load_info *info)
74951 {
74952 percpu_modfree(mod);
74953- module_free(mod, mod->module_init);
74954- module_free(mod, mod->module_core);
74955+ module_free_exec(mod, mod->module_init_rx);
74956+ module_free_exec(mod, mod->module_core_rx);
74957+ module_free(mod, mod->module_init_rw);
74958+ module_free(mod, mod->module_core_rw);
74959 }
74960
74961 int __weak module_finalize(const Elf_Ehdr *hdr,
74962@@ -2997,7 +3104,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
74963 static int post_relocation(struct module *mod, const struct load_info *info)
74964 {
74965 /* Sort exception table now relocations are done. */
74966+ pax_open_kernel();
74967 sort_extable(mod->extable, mod->extable + mod->num_exentries);
74968+ pax_close_kernel();
74969
74970 /* Copy relocated percpu area over. */
74971 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
74972@@ -3051,16 +3160,16 @@ static int do_init_module(struct module *mod)
74973 MODULE_STATE_COMING, mod);
74974
74975 /* Set RO and NX regions for core */
74976- set_section_ro_nx(mod->module_core,
74977- mod->core_text_size,
74978- mod->core_ro_size,
74979- mod->core_size);
74980+ set_section_ro_nx(mod->module_core_rx,
74981+ mod->core_size_rx,
74982+ mod->core_size_rx,
74983+ mod->core_size_rx);
74984
74985 /* Set RO and NX regions for init */
74986- set_section_ro_nx(mod->module_init,
74987- mod->init_text_size,
74988- mod->init_ro_size,
74989- mod->init_size);
74990+ set_section_ro_nx(mod->module_init_rx,
74991+ mod->init_size_rx,
74992+ mod->init_size_rx,
74993+ mod->init_size_rx);
74994
74995 do_mod_ctors(mod);
74996 /* Start the module */
74997@@ -3122,11 +3231,12 @@ static int do_init_module(struct module *mod)
74998 mod->strtab = mod->core_strtab;
74999 #endif
75000 unset_module_init_ro_nx(mod);
75001- module_free(mod, mod->module_init);
75002- mod->module_init = NULL;
75003- mod->init_size = 0;
75004- mod->init_ro_size = 0;
75005- mod->init_text_size = 0;
75006+ module_free(mod, mod->module_init_rw);
75007+ module_free_exec(mod, mod->module_init_rx);
75008+ mod->module_init_rw = NULL;
75009+ mod->module_init_rx = NULL;
75010+ mod->init_size_rw = 0;
75011+ mod->init_size_rx = 0;
75012 mutex_unlock(&module_mutex);
75013 wake_up_all(&module_wq);
75014
75015@@ -3209,9 +3319,38 @@ again:
75016 if (err)
75017 goto free_unload;
75018
75019+ /* Now copy in args */
75020+ mod->args = strndup_user(uargs, ~0UL >> 1);
75021+ if (IS_ERR(mod->args)) {
75022+ err = PTR_ERR(mod->args);
75023+ goto free_unload;
75024+ }
75025+
75026 /* Set up MODINFO_ATTR fields */
75027 setup_modinfo(mod, info);
75028
75029+#ifdef CONFIG_GRKERNSEC_MODHARDEN
75030+ {
75031+ char *p, *p2;
75032+
75033+ if (strstr(mod->args, "grsec_modharden_netdev")) {
75034+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
75035+ err = -EPERM;
75036+ goto free_modinfo;
75037+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
75038+ p += sizeof("grsec_modharden_normal") - 1;
75039+ p2 = strstr(p, "_");
75040+ if (p2) {
75041+ *p2 = '\0';
75042+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
75043+ *p2 = '_';
75044+ }
75045+ err = -EPERM;
75046+ goto free_modinfo;
75047+ }
75048+ }
75049+#endif
75050+
75051 /* Fix up syms, so that st_value is a pointer to location. */
75052 err = simplify_symbols(mod, info);
75053 if (err < 0)
75054@@ -3227,13 +3366,6 @@ again:
75055
75056 flush_module_icache(mod);
75057
75058- /* Now copy in args */
75059- mod->args = strndup_user(uargs, ~0UL >> 1);
75060- if (IS_ERR(mod->args)) {
75061- err = PTR_ERR(mod->args);
75062- goto free_arch_cleanup;
75063- }
75064-
75065 dynamic_debug_setup(info->debug, info->num_debug);
75066
75067 mutex_lock(&module_mutex);
75068@@ -3278,11 +3410,10 @@ again:
75069 mutex_unlock(&module_mutex);
75070 dynamic_debug_remove(info->debug);
75071 synchronize_sched();
75072- kfree(mod->args);
75073- free_arch_cleanup:
75074 module_arch_cleanup(mod);
75075 free_modinfo:
75076 free_modinfo(mod);
75077+ kfree(mod->args);
75078 free_unload:
75079 module_unload_free(mod);
75080 unlink_mod:
75081@@ -3365,10 +3496,16 @@ static const char *get_ksymbol(struct module *mod,
75082 unsigned long nextval;
75083
75084 /* At worse, next value is at end of module */
75085- if (within_module_init(addr, mod))
75086- nextval = (unsigned long)mod->module_init+mod->init_text_size;
75087+ if (within_module_init_rx(addr, mod))
75088+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
75089+ else if (within_module_init_rw(addr, mod))
75090+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
75091+ else if (within_module_core_rx(addr, mod))
75092+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
75093+ else if (within_module_core_rw(addr, mod))
75094+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
75095 else
75096- nextval = (unsigned long)mod->module_core+mod->core_text_size;
75097+ return NULL;
75098
75099 /* Scan for closest preceding symbol, and next symbol. (ELF
75100 starts real symbols at 1). */
75101@@ -3621,7 +3758,7 @@ static int m_show(struct seq_file *m, void *p)
75102 return 0;
75103
75104 seq_printf(m, "%s %u",
75105- mod->name, mod->init_size + mod->core_size);
75106+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
75107 print_unload_info(m, mod);
75108
75109 /* Informative for users. */
75110@@ -3630,7 +3767,7 @@ static int m_show(struct seq_file *m, void *p)
75111 mod->state == MODULE_STATE_COMING ? "Loading":
75112 "Live");
75113 /* Used by oprofile and other similar tools. */
75114- seq_printf(m, " 0x%pK", mod->module_core);
75115+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
75116
75117 /* Taints info */
75118 if (mod->taints)
75119@@ -3666,7 +3803,17 @@ static const struct file_operations proc_modules_operations = {
75120
75121 static int __init proc_modules_init(void)
75122 {
75123+#ifndef CONFIG_GRKERNSEC_HIDESYM
75124+#ifdef CONFIG_GRKERNSEC_PROC_USER
75125+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
75126+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75127+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
75128+#else
75129 proc_create("modules", 0, NULL, &proc_modules_operations);
75130+#endif
75131+#else
75132+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
75133+#endif
75134 return 0;
75135 }
75136 module_init(proc_modules_init);
75137@@ -3727,14 +3874,14 @@ struct module *__module_address(unsigned long addr)
75138 {
75139 struct module *mod;
75140
75141- if (addr < module_addr_min || addr > module_addr_max)
75142+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
75143+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
75144 return NULL;
75145
75146 list_for_each_entry_rcu(mod, &modules, list) {
75147 if (mod->state == MODULE_STATE_UNFORMED)
75148 continue;
75149- if (within_module_core(addr, mod)
75150- || within_module_init(addr, mod))
75151+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
75152 return mod;
75153 }
75154 return NULL;
75155@@ -3769,11 +3916,20 @@ bool is_module_text_address(unsigned long addr)
75156 */
75157 struct module *__module_text_address(unsigned long addr)
75158 {
75159- struct module *mod = __module_address(addr);
75160+ struct module *mod;
75161+
75162+#ifdef CONFIG_X86_32
75163+ addr = ktla_ktva(addr);
75164+#endif
75165+
75166+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
75167+ return NULL;
75168+
75169+ mod = __module_address(addr);
75170+
75171 if (mod) {
75172 /* Make sure it's within the text section. */
75173- if (!within(addr, mod->module_init, mod->init_text_size)
75174- && !within(addr, mod->module_core, mod->core_text_size))
75175+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
75176 mod = NULL;
75177 }
75178 return mod;
75179diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
75180index 7e3443f..b2a1e6b 100644
75181--- a/kernel/mutex-debug.c
75182+++ b/kernel/mutex-debug.c
75183@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
75184 }
75185
75186 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
75187- struct thread_info *ti)
75188+ struct task_struct *task)
75189 {
75190 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
75191
75192 /* Mark the current thread as blocked on the lock: */
75193- ti->task->blocked_on = waiter;
75194+ task->blocked_on = waiter;
75195 }
75196
75197 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
75198- struct thread_info *ti)
75199+ struct task_struct *task)
75200 {
75201 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
75202- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
75203- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
75204- ti->task->blocked_on = NULL;
75205+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
75206+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
75207+ task->blocked_on = NULL;
75208
75209 list_del_init(&waiter->list);
75210 waiter->task = NULL;
75211diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
75212index 0799fd3..d06ae3b 100644
75213--- a/kernel/mutex-debug.h
75214+++ b/kernel/mutex-debug.h
75215@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
75216 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
75217 extern void debug_mutex_add_waiter(struct mutex *lock,
75218 struct mutex_waiter *waiter,
75219- struct thread_info *ti);
75220+ struct task_struct *task);
75221 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
75222- struct thread_info *ti);
75223+ struct task_struct *task);
75224 extern void debug_mutex_unlock(struct mutex *lock);
75225 extern void debug_mutex_init(struct mutex *lock, const char *name,
75226 struct lock_class_key *key);
75227diff --git a/kernel/mutex.c b/kernel/mutex.c
75228index a307cc9..27fd2e9 100644
75229--- a/kernel/mutex.c
75230+++ b/kernel/mutex.c
75231@@ -198,7 +198,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
75232 spin_lock_mutex(&lock->wait_lock, flags);
75233
75234 debug_mutex_lock_common(lock, &waiter);
75235- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
75236+ debug_mutex_add_waiter(lock, &waiter, task);
75237
75238 /* add waiting tasks to the end of the waitqueue (FIFO): */
75239 list_add_tail(&waiter.list, &lock->wait_list);
75240@@ -227,8 +227,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
75241 * TASK_UNINTERRUPTIBLE case.)
75242 */
75243 if (unlikely(signal_pending_state(state, task))) {
75244- mutex_remove_waiter(lock, &waiter,
75245- task_thread_info(task));
75246+ mutex_remove_waiter(lock, &waiter, task);
75247 mutex_release(&lock->dep_map, 1, ip);
75248 spin_unlock_mutex(&lock->wait_lock, flags);
75249
75250@@ -247,7 +246,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
75251 done:
75252 lock_acquired(&lock->dep_map, ip);
75253 /* got the lock - rejoice! */
75254- mutex_remove_waiter(lock, &waiter, current_thread_info());
75255+ mutex_remove_waiter(lock, &waiter, task);
75256 mutex_set_owner(lock);
75257
75258 /* set it to 0 if there are no waiters left: */
75259diff --git a/kernel/notifier.c b/kernel/notifier.c
75260index 2d5cc4c..d9ea600 100644
75261--- a/kernel/notifier.c
75262+++ b/kernel/notifier.c
75263@@ -5,6 +5,7 @@
75264 #include <linux/rcupdate.h>
75265 #include <linux/vmalloc.h>
75266 #include <linux/reboot.h>
75267+#include <linux/mm.h>
75268
75269 /*
75270 * Notifier list for kernel code which wants to be called
75271@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
75272 while ((*nl) != NULL) {
75273 if (n->priority > (*nl)->priority)
75274 break;
75275- nl = &((*nl)->next);
75276+ nl = (struct notifier_block **)&((*nl)->next);
75277 }
75278- n->next = *nl;
75279+ pax_open_kernel();
75280+ *(const void **)&n->next = *nl;
75281 rcu_assign_pointer(*nl, n);
75282+ pax_close_kernel();
75283 return 0;
75284 }
75285
75286@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
75287 return 0;
75288 if (n->priority > (*nl)->priority)
75289 break;
75290- nl = &((*nl)->next);
75291+ nl = (struct notifier_block **)&((*nl)->next);
75292 }
75293- n->next = *nl;
75294+ pax_open_kernel();
75295+ *(const void **)&n->next = *nl;
75296 rcu_assign_pointer(*nl, n);
75297+ pax_close_kernel();
75298 return 0;
75299 }
75300
75301@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
75302 {
75303 while ((*nl) != NULL) {
75304 if ((*nl) == n) {
75305+ pax_open_kernel();
75306 rcu_assign_pointer(*nl, n->next);
75307+ pax_close_kernel();
75308 return 0;
75309 }
75310- nl = &((*nl)->next);
75311+ nl = (struct notifier_block **)&((*nl)->next);
75312 }
75313 return -ENOENT;
75314 }
75315diff --git a/kernel/panic.c b/kernel/panic.c
75316index e1b2822..5edc1d9 100644
75317--- a/kernel/panic.c
75318+++ b/kernel/panic.c
75319@@ -410,7 +410,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
75320 const char *board;
75321
75322 printk(KERN_WARNING "------------[ cut here ]------------\n");
75323- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
75324+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
75325 board = dmi_get_system_info(DMI_PRODUCT_NAME);
75326 if (board)
75327 printk(KERN_WARNING "Hardware name: %s\n", board);
75328@@ -465,7 +465,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
75329 */
75330 void __stack_chk_fail(void)
75331 {
75332- panic("stack-protector: Kernel stack is corrupted in: %p\n",
75333+ dump_stack();
75334+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
75335 __builtin_return_address(0));
75336 }
75337 EXPORT_SYMBOL(__stack_chk_fail);
75338diff --git a/kernel/pid.c b/kernel/pid.c
75339index f2c6a68..4922d97 100644
75340--- a/kernel/pid.c
75341+++ b/kernel/pid.c
75342@@ -33,6 +33,7 @@
75343 #include <linux/rculist.h>
75344 #include <linux/bootmem.h>
75345 #include <linux/hash.h>
75346+#include <linux/security.h>
75347 #include <linux/pid_namespace.h>
75348 #include <linux/init_task.h>
75349 #include <linux/syscalls.h>
75350@@ -46,7 +47,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
75351
75352 int pid_max = PID_MAX_DEFAULT;
75353
75354-#define RESERVED_PIDS 300
75355+#define RESERVED_PIDS 500
75356
75357 int pid_max_min = RESERVED_PIDS + 1;
75358 int pid_max_max = PID_MAX_LIMIT;
75359@@ -441,10 +442,18 @@ EXPORT_SYMBOL(pid_task);
75360 */
75361 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
75362 {
75363+ struct task_struct *task;
75364+
75365 rcu_lockdep_assert(rcu_read_lock_held(),
75366 "find_task_by_pid_ns() needs rcu_read_lock()"
75367 " protection");
75368- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
75369+
75370+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
75371+
75372+ if (gr_pid_is_chrooted(task))
75373+ return NULL;
75374+
75375+ return task;
75376 }
75377
75378 struct task_struct *find_task_by_vpid(pid_t vnr)
75379@@ -452,6 +461,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
75380 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
75381 }
75382
75383+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
75384+{
75385+ rcu_lockdep_assert(rcu_read_lock_held(),
75386+ "find_task_by_pid_ns() needs rcu_read_lock()"
75387+ " protection");
75388+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
75389+}
75390+
75391 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
75392 {
75393 struct pid *pid;
75394diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
75395index c1c3dc1..bbeaf31 100644
75396--- a/kernel/pid_namespace.c
75397+++ b/kernel/pid_namespace.c
75398@@ -248,7 +248,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
75399 void __user *buffer, size_t *lenp, loff_t *ppos)
75400 {
75401 struct pid_namespace *pid_ns = task_active_pid_ns(current);
75402- struct ctl_table tmp = *table;
75403+ ctl_table_no_const tmp = *table;
75404
75405 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
75406 return -EPERM;
75407diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
75408index 942ca27..111e609 100644
75409--- a/kernel/posix-cpu-timers.c
75410+++ b/kernel/posix-cpu-timers.c
75411@@ -1576,14 +1576,14 @@ struct k_clock clock_posix_cpu = {
75412
75413 static __init int init_posix_cpu_timers(void)
75414 {
75415- struct k_clock process = {
75416+ static struct k_clock process = {
75417 .clock_getres = process_cpu_clock_getres,
75418 .clock_get = process_cpu_clock_get,
75419 .timer_create = process_cpu_timer_create,
75420 .nsleep = process_cpu_nsleep,
75421 .nsleep_restart = process_cpu_nsleep_restart,
75422 };
75423- struct k_clock thread = {
75424+ static struct k_clock thread = {
75425 .clock_getres = thread_cpu_clock_getres,
75426 .clock_get = thread_cpu_clock_get,
75427 .timer_create = thread_cpu_timer_create,
75428diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
75429index e885be1..380fe76 100644
75430--- a/kernel/posix-timers.c
75431+++ b/kernel/posix-timers.c
75432@@ -43,6 +43,7 @@
75433 #include <linux/idr.h>
75434 #include <linux/posix-clock.h>
75435 #include <linux/posix-timers.h>
75436+#include <linux/grsecurity.h>
75437 #include <linux/syscalls.h>
75438 #include <linux/wait.h>
75439 #include <linux/workqueue.h>
75440@@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
75441 * which we beg off on and pass to do_sys_settimeofday().
75442 */
75443
75444-static struct k_clock posix_clocks[MAX_CLOCKS];
75445+static struct k_clock *posix_clocks[MAX_CLOCKS];
75446
75447 /*
75448 * These ones are defined below.
75449@@ -227,7 +228,7 @@ static int posix_get_boottime(const clockid_t which_clock, struct timespec *tp)
75450 */
75451 static __init int init_posix_timers(void)
75452 {
75453- struct k_clock clock_realtime = {
75454+ static struct k_clock clock_realtime = {
75455 .clock_getres = hrtimer_get_res,
75456 .clock_get = posix_clock_realtime_get,
75457 .clock_set = posix_clock_realtime_set,
75458@@ -239,7 +240,7 @@ static __init int init_posix_timers(void)
75459 .timer_get = common_timer_get,
75460 .timer_del = common_timer_del,
75461 };
75462- struct k_clock clock_monotonic = {
75463+ static struct k_clock clock_monotonic = {
75464 .clock_getres = hrtimer_get_res,
75465 .clock_get = posix_ktime_get_ts,
75466 .nsleep = common_nsleep,
75467@@ -249,19 +250,19 @@ static __init int init_posix_timers(void)
75468 .timer_get = common_timer_get,
75469 .timer_del = common_timer_del,
75470 };
75471- struct k_clock clock_monotonic_raw = {
75472+ static struct k_clock clock_monotonic_raw = {
75473 .clock_getres = hrtimer_get_res,
75474 .clock_get = posix_get_monotonic_raw,
75475 };
75476- struct k_clock clock_realtime_coarse = {
75477+ static struct k_clock clock_realtime_coarse = {
75478 .clock_getres = posix_get_coarse_res,
75479 .clock_get = posix_get_realtime_coarse,
75480 };
75481- struct k_clock clock_monotonic_coarse = {
75482+ static struct k_clock clock_monotonic_coarse = {
75483 .clock_getres = posix_get_coarse_res,
75484 .clock_get = posix_get_monotonic_coarse,
75485 };
75486- struct k_clock clock_boottime = {
75487+ static struct k_clock clock_boottime = {
75488 .clock_getres = hrtimer_get_res,
75489 .clock_get = posix_get_boottime,
75490 .nsleep = common_nsleep,
75491@@ -473,7 +474,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
75492 return;
75493 }
75494
75495- posix_clocks[clock_id] = *new_clock;
75496+ posix_clocks[clock_id] = new_clock;
75497 }
75498 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
75499
75500@@ -519,9 +520,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
75501 return (id & CLOCKFD_MASK) == CLOCKFD ?
75502 &clock_posix_dynamic : &clock_posix_cpu;
75503
75504- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
75505+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
75506 return NULL;
75507- return &posix_clocks[id];
75508+ return posix_clocks[id];
75509 }
75510
75511 static int common_timer_create(struct k_itimer *new_timer)
75512@@ -966,6 +967,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
75513 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
75514 return -EFAULT;
75515
75516+ /* only the CLOCK_REALTIME clock can be set, all other clocks
75517+ have their clock_set fptr set to a nosettime dummy function
75518+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
75519+ call common_clock_set, which calls do_sys_settimeofday, which
75520+ we hook
75521+ */
75522+
75523 return kc->clock_set(which_clock, &new_tp);
75524 }
75525
75526diff --git a/kernel/power/process.c b/kernel/power/process.c
75527index d5a258b..4271191 100644
75528--- a/kernel/power/process.c
75529+++ b/kernel/power/process.c
75530@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
75531 u64 elapsed_csecs64;
75532 unsigned int elapsed_csecs;
75533 bool wakeup = false;
75534+ bool timedout = false;
75535
75536 do_gettimeofday(&start);
75537
75538@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
75539
75540 while (true) {
75541 todo = 0;
75542+ if (time_after(jiffies, end_time))
75543+ timedout = true;
75544 read_lock(&tasklist_lock);
75545 do_each_thread(g, p) {
75546 if (p == current || !freeze_task(p))
75547 continue;
75548
75549- if (!freezer_should_skip(p))
75550+ if (!freezer_should_skip(p)) {
75551 todo++;
75552+ if (timedout) {
75553+ printk(KERN_ERR "Task refusing to freeze:\n");
75554+ sched_show_task(p);
75555+ }
75556+ }
75557 } while_each_thread(g, p);
75558 read_unlock(&tasklist_lock);
75559
75560@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
75561 todo += wq_busy;
75562 }
75563
75564- if (!todo || time_after(jiffies, end_time))
75565+ if (!todo || timedout)
75566 break;
75567
75568 if (pm_wakeup_pending()) {
75569diff --git a/kernel/printk.c b/kernel/printk.c
75570index 267ce78..2487112 100644
75571--- a/kernel/printk.c
75572+++ b/kernel/printk.c
75573@@ -609,11 +609,17 @@ static unsigned int devkmsg_poll(struct file *file, poll_table *wait)
75574 return ret;
75575 }
75576
75577+static int check_syslog_permissions(int type, bool from_file);
75578+
75579 static int devkmsg_open(struct inode *inode, struct file *file)
75580 {
75581 struct devkmsg_user *user;
75582 int err;
75583
75584+ err = check_syslog_permissions(SYSLOG_ACTION_OPEN, SYSLOG_FROM_FILE);
75585+ if (err)
75586+ return err;
75587+
75588 /* write-only does not need any file context */
75589 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
75590 return 0;
75591@@ -822,7 +828,7 @@ static int syslog_action_restricted(int type)
75592 if (dmesg_restrict)
75593 return 1;
75594 /* Unless restricted, we allow "read all" and "get buffer size" for everybody */
75595- return type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
75596+ return type != SYSLOG_ACTION_OPEN && type != SYSLOG_ACTION_READ_ALL && type != SYSLOG_ACTION_SIZE_BUFFER;
75597 }
75598
75599 static int check_syslog_permissions(int type, bool from_file)
75600@@ -834,6 +840,11 @@ static int check_syslog_permissions(int type, bool from_file)
75601 if (from_file && type != SYSLOG_ACTION_OPEN)
75602 return 0;
75603
75604+#ifdef CONFIG_GRKERNSEC_DMESG
75605+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
75606+ return -EPERM;
75607+#endif
75608+
75609 if (syslog_action_restricted(type)) {
75610 if (capable(CAP_SYSLOG))
75611 return 0;
75612diff --git a/kernel/profile.c b/kernel/profile.c
75613index 1f39181..86093471 100644
75614--- a/kernel/profile.c
75615+++ b/kernel/profile.c
75616@@ -40,7 +40,7 @@ struct profile_hit {
75617 /* Oprofile timer tick hook */
75618 static int (*timer_hook)(struct pt_regs *) __read_mostly;
75619
75620-static atomic_t *prof_buffer;
75621+static atomic_unchecked_t *prof_buffer;
75622 static unsigned long prof_len, prof_shift;
75623
75624 int prof_on __read_mostly;
75625@@ -282,7 +282,7 @@ static void profile_flip_buffers(void)
75626 hits[i].pc = 0;
75627 continue;
75628 }
75629- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
75630+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
75631 hits[i].hits = hits[i].pc = 0;
75632 }
75633 }
75634@@ -343,9 +343,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
75635 * Add the current hit(s) and flush the write-queue out
75636 * to the global buffer:
75637 */
75638- atomic_add(nr_hits, &prof_buffer[pc]);
75639+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
75640 for (i = 0; i < NR_PROFILE_HIT; ++i) {
75641- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
75642+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
75643 hits[i].pc = hits[i].hits = 0;
75644 }
75645 out:
75646@@ -420,7 +420,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
75647 {
75648 unsigned long pc;
75649 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
75650- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
75651+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
75652 }
75653 #endif /* !CONFIG_SMP */
75654
75655@@ -518,7 +518,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
75656 return -EFAULT;
75657 buf++; p++; count--; read++;
75658 }
75659- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
75660+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
75661 if (copy_to_user(buf, (void *)pnt, count))
75662 return -EFAULT;
75663 read += count;
75664@@ -549,7 +549,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
75665 }
75666 #endif
75667 profile_discard_flip_buffers();
75668- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
75669+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
75670 return count;
75671 }
75672
75673diff --git a/kernel/ptrace.c b/kernel/ptrace.c
75674index 6cbeaae..363c48a 100644
75675--- a/kernel/ptrace.c
75676+++ b/kernel/ptrace.c
75677@@ -324,7 +324,7 @@ static int ptrace_attach(struct task_struct *task, long request,
75678 if (seize)
75679 flags |= PT_SEIZED;
75680 rcu_read_lock();
75681- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
75682+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
75683 flags |= PT_PTRACE_CAP;
75684 rcu_read_unlock();
75685 task->ptrace = flags;
75686@@ -535,7 +535,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
75687 break;
75688 return -EIO;
75689 }
75690- if (copy_to_user(dst, buf, retval))
75691+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
75692 return -EFAULT;
75693 copied += retval;
75694 src += retval;
75695@@ -720,7 +720,7 @@ int ptrace_request(struct task_struct *child, long request,
75696 bool seized = child->ptrace & PT_SEIZED;
75697 int ret = -EIO;
75698 siginfo_t siginfo, *si;
75699- void __user *datavp = (void __user *) data;
75700+ void __user *datavp = (__force void __user *) data;
75701 unsigned long __user *datalp = datavp;
75702 unsigned long flags;
75703
75704@@ -922,14 +922,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
75705 goto out;
75706 }
75707
75708+ if (gr_handle_ptrace(child, request)) {
75709+ ret = -EPERM;
75710+ goto out_put_task_struct;
75711+ }
75712+
75713 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
75714 ret = ptrace_attach(child, request, addr, data);
75715 /*
75716 * Some architectures need to do book-keeping after
75717 * a ptrace attach.
75718 */
75719- if (!ret)
75720+ if (!ret) {
75721 arch_ptrace_attach(child);
75722+ gr_audit_ptrace(child);
75723+ }
75724 goto out_put_task_struct;
75725 }
75726
75727@@ -957,7 +964,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
75728 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
75729 if (copied != sizeof(tmp))
75730 return -EIO;
75731- return put_user(tmp, (unsigned long __user *)data);
75732+ return put_user(tmp, (__force unsigned long __user *)data);
75733 }
75734
75735 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
75736@@ -1067,14 +1074,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
75737 goto out;
75738 }
75739
75740+ if (gr_handle_ptrace(child, request)) {
75741+ ret = -EPERM;
75742+ goto out_put_task_struct;
75743+ }
75744+
75745 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
75746 ret = ptrace_attach(child, request, addr, data);
75747 /*
75748 * Some architectures need to do book-keeping after
75749 * a ptrace attach.
75750 */
75751- if (!ret)
75752+ if (!ret) {
75753 arch_ptrace_attach(child);
75754+ gr_audit_ptrace(child);
75755+ }
75756 goto out_put_task_struct;
75757 }
75758
75759diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
75760index e7dce58..ad0d7b7 100644
75761--- a/kernel/rcutiny.c
75762+++ b/kernel/rcutiny.c
75763@@ -46,7 +46,7 @@
75764 struct rcu_ctrlblk;
75765 static void invoke_rcu_callbacks(void);
75766 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
75767-static void rcu_process_callbacks(struct softirq_action *unused);
75768+static void rcu_process_callbacks(void);
75769 static void __call_rcu(struct rcu_head *head,
75770 void (*func)(struct rcu_head *rcu),
75771 struct rcu_ctrlblk *rcp);
75772@@ -310,7 +310,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
75773 rcu_is_callbacks_kthread()));
75774 }
75775
75776-static void rcu_process_callbacks(struct softirq_action *unused)
75777+static void rcu_process_callbacks(void)
75778 {
75779 __rcu_process_callbacks(&rcu_sched_ctrlblk);
75780 __rcu_process_callbacks(&rcu_bh_ctrlblk);
75781diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
75782index f85016a..91cb03b 100644
75783--- a/kernel/rcutiny_plugin.h
75784+++ b/kernel/rcutiny_plugin.h
75785@@ -896,7 +896,7 @@ static int rcu_kthread(void *arg)
75786 have_rcu_kthread_work = morework;
75787 local_irq_restore(flags);
75788 if (work)
75789- rcu_process_callbacks(NULL);
75790+ rcu_process_callbacks();
75791 schedule_timeout_interruptible(1); /* Leave CPU for others. */
75792 }
75793
75794diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
75795index 31dea01..ad91ffb 100644
75796--- a/kernel/rcutorture.c
75797+++ b/kernel/rcutorture.c
75798@@ -163,12 +163,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
75799 { 0 };
75800 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
75801 { 0 };
75802-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75803-static atomic_t n_rcu_torture_alloc;
75804-static atomic_t n_rcu_torture_alloc_fail;
75805-static atomic_t n_rcu_torture_free;
75806-static atomic_t n_rcu_torture_mberror;
75807-static atomic_t n_rcu_torture_error;
75808+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
75809+static atomic_unchecked_t n_rcu_torture_alloc;
75810+static atomic_unchecked_t n_rcu_torture_alloc_fail;
75811+static atomic_unchecked_t n_rcu_torture_free;
75812+static atomic_unchecked_t n_rcu_torture_mberror;
75813+static atomic_unchecked_t n_rcu_torture_error;
75814 static long n_rcu_torture_barrier_error;
75815 static long n_rcu_torture_boost_ktrerror;
75816 static long n_rcu_torture_boost_rterror;
75817@@ -272,11 +272,11 @@ rcu_torture_alloc(void)
75818
75819 spin_lock_bh(&rcu_torture_lock);
75820 if (list_empty(&rcu_torture_freelist)) {
75821- atomic_inc(&n_rcu_torture_alloc_fail);
75822+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
75823 spin_unlock_bh(&rcu_torture_lock);
75824 return NULL;
75825 }
75826- atomic_inc(&n_rcu_torture_alloc);
75827+ atomic_inc_unchecked(&n_rcu_torture_alloc);
75828 p = rcu_torture_freelist.next;
75829 list_del_init(p);
75830 spin_unlock_bh(&rcu_torture_lock);
75831@@ -289,7 +289,7 @@ rcu_torture_alloc(void)
75832 static void
75833 rcu_torture_free(struct rcu_torture *p)
75834 {
75835- atomic_inc(&n_rcu_torture_free);
75836+ atomic_inc_unchecked(&n_rcu_torture_free);
75837 spin_lock_bh(&rcu_torture_lock);
75838 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
75839 spin_unlock_bh(&rcu_torture_lock);
75840@@ -409,7 +409,7 @@ rcu_torture_cb(struct rcu_head *p)
75841 i = rp->rtort_pipe_count;
75842 if (i > RCU_TORTURE_PIPE_LEN)
75843 i = RCU_TORTURE_PIPE_LEN;
75844- atomic_inc(&rcu_torture_wcount[i]);
75845+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75846 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75847 rp->rtort_mbtest = 0;
75848 rcu_torture_free(rp);
75849@@ -457,7 +457,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
75850 i = rp->rtort_pipe_count;
75851 if (i > RCU_TORTURE_PIPE_LEN)
75852 i = RCU_TORTURE_PIPE_LEN;
75853- atomic_inc(&rcu_torture_wcount[i]);
75854+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75855 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
75856 rp->rtort_mbtest = 0;
75857 list_del(&rp->rtort_free);
75858@@ -975,7 +975,7 @@ rcu_torture_writer(void *arg)
75859 i = old_rp->rtort_pipe_count;
75860 if (i > RCU_TORTURE_PIPE_LEN)
75861 i = RCU_TORTURE_PIPE_LEN;
75862- atomic_inc(&rcu_torture_wcount[i]);
75863+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
75864 old_rp->rtort_pipe_count++;
75865 cur_ops->deferred_free(old_rp);
75866 }
75867@@ -1060,7 +1060,7 @@ static void rcu_torture_timer(unsigned long unused)
75868 }
75869 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
75870 if (p->rtort_mbtest == 0)
75871- atomic_inc(&n_rcu_torture_mberror);
75872+ atomic_inc_unchecked(&n_rcu_torture_mberror);
75873 spin_lock(&rand_lock);
75874 cur_ops->read_delay(&rand);
75875 n_rcu_torture_timers++;
75876@@ -1124,7 +1124,7 @@ rcu_torture_reader(void *arg)
75877 }
75878 do_trace_rcu_torture_read(cur_ops->name, &p->rtort_rcu);
75879 if (p->rtort_mbtest == 0)
75880- atomic_inc(&n_rcu_torture_mberror);
75881+ atomic_inc_unchecked(&n_rcu_torture_mberror);
75882 cur_ops->read_delay(&rand);
75883 preempt_disable();
75884 pipe_count = p->rtort_pipe_count;
75885@@ -1183,11 +1183,11 @@ rcu_torture_printk(char *page)
75886 rcu_torture_current,
75887 rcu_torture_current_version,
75888 list_empty(&rcu_torture_freelist),
75889- atomic_read(&n_rcu_torture_alloc),
75890- atomic_read(&n_rcu_torture_alloc_fail),
75891- atomic_read(&n_rcu_torture_free));
75892+ atomic_read_unchecked(&n_rcu_torture_alloc),
75893+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
75894+ atomic_read_unchecked(&n_rcu_torture_free));
75895 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
75896- atomic_read(&n_rcu_torture_mberror),
75897+ atomic_read_unchecked(&n_rcu_torture_mberror),
75898 n_rcu_torture_boost_ktrerror,
75899 n_rcu_torture_boost_rterror);
75900 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
75901@@ -1206,14 +1206,14 @@ rcu_torture_printk(char *page)
75902 n_barrier_attempts,
75903 n_rcu_torture_barrier_error);
75904 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
75905- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
75906+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
75907 n_rcu_torture_barrier_error != 0 ||
75908 n_rcu_torture_boost_ktrerror != 0 ||
75909 n_rcu_torture_boost_rterror != 0 ||
75910 n_rcu_torture_boost_failure != 0 ||
75911 i > 1) {
75912 cnt += sprintf(&page[cnt], "!!! ");
75913- atomic_inc(&n_rcu_torture_error);
75914+ atomic_inc_unchecked(&n_rcu_torture_error);
75915 WARN_ON_ONCE(1);
75916 }
75917 cnt += sprintf(&page[cnt], "Reader Pipe: ");
75918@@ -1227,7 +1227,7 @@ rcu_torture_printk(char *page)
75919 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
75920 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75921 cnt += sprintf(&page[cnt], " %d",
75922- atomic_read(&rcu_torture_wcount[i]));
75923+ atomic_read_unchecked(&rcu_torture_wcount[i]));
75924 }
75925 cnt += sprintf(&page[cnt], "\n");
75926 if (cur_ops->stats)
75927@@ -1920,7 +1920,7 @@ rcu_torture_cleanup(void)
75928
75929 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
75930
75931- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
75932+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
75933 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
75934 else if (n_online_successes != n_online_attempts ||
75935 n_offline_successes != n_offline_attempts)
75936@@ -1989,18 +1989,18 @@ rcu_torture_init(void)
75937
75938 rcu_torture_current = NULL;
75939 rcu_torture_current_version = 0;
75940- atomic_set(&n_rcu_torture_alloc, 0);
75941- atomic_set(&n_rcu_torture_alloc_fail, 0);
75942- atomic_set(&n_rcu_torture_free, 0);
75943- atomic_set(&n_rcu_torture_mberror, 0);
75944- atomic_set(&n_rcu_torture_error, 0);
75945+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
75946+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
75947+ atomic_set_unchecked(&n_rcu_torture_free, 0);
75948+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
75949+ atomic_set_unchecked(&n_rcu_torture_error, 0);
75950 n_rcu_torture_barrier_error = 0;
75951 n_rcu_torture_boost_ktrerror = 0;
75952 n_rcu_torture_boost_rterror = 0;
75953 n_rcu_torture_boost_failure = 0;
75954 n_rcu_torture_boosts = 0;
75955 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
75956- atomic_set(&rcu_torture_wcount[i], 0);
75957+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
75958 for_each_possible_cpu(cpu) {
75959 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
75960 per_cpu(rcu_torture_count, cpu)[i] = 0;
75961diff --git a/kernel/rcutree.c b/kernel/rcutree.c
75962index e441b77..dd54f17 100644
75963--- a/kernel/rcutree.c
75964+++ b/kernel/rcutree.c
75965@@ -349,9 +349,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
75966 rcu_prepare_for_idle(smp_processor_id());
75967 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
75968 smp_mb__before_atomic_inc(); /* See above. */
75969- atomic_inc(&rdtp->dynticks);
75970+ atomic_inc_unchecked(&rdtp->dynticks);
75971 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
75972- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
75973+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
75974
75975 /*
75976 * It is illegal to enter an extended quiescent state while
75977@@ -487,10 +487,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
75978 int user)
75979 {
75980 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
75981- atomic_inc(&rdtp->dynticks);
75982+ atomic_inc_unchecked(&rdtp->dynticks);
75983 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
75984 smp_mb__after_atomic_inc(); /* See above. */
75985- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
75986+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
75987 rcu_cleanup_after_idle(smp_processor_id());
75988 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
75989 if (!user && !is_idle_task(current)) {
75990@@ -629,14 +629,14 @@ void rcu_nmi_enter(void)
75991 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
75992
75993 if (rdtp->dynticks_nmi_nesting == 0 &&
75994- (atomic_read(&rdtp->dynticks) & 0x1))
75995+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
75996 return;
75997 rdtp->dynticks_nmi_nesting++;
75998 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
75999- atomic_inc(&rdtp->dynticks);
76000+ atomic_inc_unchecked(&rdtp->dynticks);
76001 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
76002 smp_mb__after_atomic_inc(); /* See above. */
76003- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
76004+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
76005 }
76006
76007 /**
76008@@ -655,9 +655,9 @@ void rcu_nmi_exit(void)
76009 return;
76010 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
76011 smp_mb__before_atomic_inc(); /* See above. */
76012- atomic_inc(&rdtp->dynticks);
76013+ atomic_inc_unchecked(&rdtp->dynticks);
76014 smp_mb__after_atomic_inc(); /* Force delay to next write. */
76015- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
76016+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
76017 }
76018
76019 /**
76020@@ -671,7 +671,7 @@ int rcu_is_cpu_idle(void)
76021 int ret;
76022
76023 preempt_disable();
76024- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
76025+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
76026 preempt_enable();
76027 return ret;
76028 }
76029@@ -739,7 +739,7 @@ int rcu_is_cpu_rrupt_from_idle(void)
76030 */
76031 static int dyntick_save_progress_counter(struct rcu_data *rdp)
76032 {
76033- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
76034+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
76035 return (rdp->dynticks_snap & 0x1) == 0;
76036 }
76037
76038@@ -754,7 +754,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
76039 unsigned int curr;
76040 unsigned int snap;
76041
76042- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
76043+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
76044 snap = (unsigned int)rdp->dynticks_snap;
76045
76046 /*
76047@@ -802,10 +802,10 @@ static int jiffies_till_stall_check(void)
76048 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
76049 */
76050 if (till_stall_check < 3) {
76051- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
76052+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
76053 till_stall_check = 3;
76054 } else if (till_stall_check > 300) {
76055- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
76056+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
76057 till_stall_check = 300;
76058 }
76059 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
76060@@ -1592,7 +1592,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
76061 rsp->qlen += rdp->qlen;
76062 rdp->n_cbs_orphaned += rdp->qlen;
76063 rdp->qlen_lazy = 0;
76064- ACCESS_ONCE(rdp->qlen) = 0;
76065+ ACCESS_ONCE_RW(rdp->qlen) = 0;
76066 }
76067
76068 /*
76069@@ -1838,7 +1838,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
76070 }
76071 smp_mb(); /* List handling before counting for rcu_barrier(). */
76072 rdp->qlen_lazy -= count_lazy;
76073- ACCESS_ONCE(rdp->qlen) -= count;
76074+ ACCESS_ONCE_RW(rdp->qlen) -= count;
76075 rdp->n_cbs_invoked += count;
76076
76077 /* Reinstate batch limit if we have worked down the excess. */
76078@@ -2031,7 +2031,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
76079 /*
76080 * Do RCU core processing for the current CPU.
76081 */
76082-static void rcu_process_callbacks(struct softirq_action *unused)
76083+static void rcu_process_callbacks(void)
76084 {
76085 struct rcu_state *rsp;
76086
76087@@ -2154,7 +2154,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
76088 local_irq_restore(flags);
76089 return;
76090 }
76091- ACCESS_ONCE(rdp->qlen)++;
76092+ ACCESS_ONCE_RW(rdp->qlen)++;
76093 if (lazy)
76094 rdp->qlen_lazy++;
76095 else
76096@@ -2363,11 +2363,11 @@ void synchronize_sched_expedited(void)
76097 * counter wrap on a 32-bit system. Quite a few more CPUs would of
76098 * course be required on a 64-bit system.
76099 */
76100- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
76101+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
76102 (ulong)atomic_long_read(&rsp->expedited_done) +
76103 ULONG_MAX / 8)) {
76104 synchronize_sched();
76105- atomic_long_inc(&rsp->expedited_wrap);
76106+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
76107 return;
76108 }
76109
76110@@ -2375,7 +2375,7 @@ void synchronize_sched_expedited(void)
76111 * Take a ticket. Note that atomic_inc_return() implies a
76112 * full memory barrier.
76113 */
76114- snap = atomic_long_inc_return(&rsp->expedited_start);
76115+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
76116 firstsnap = snap;
76117 get_online_cpus();
76118 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
76119@@ -2388,14 +2388,14 @@ void synchronize_sched_expedited(void)
76120 synchronize_sched_expedited_cpu_stop,
76121 NULL) == -EAGAIN) {
76122 put_online_cpus();
76123- atomic_long_inc(&rsp->expedited_tryfail);
76124+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
76125
76126 /* Check to see if someone else did our work for us. */
76127 s = atomic_long_read(&rsp->expedited_done);
76128 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
76129 /* ensure test happens before caller kfree */
76130 smp_mb__before_atomic_inc(); /* ^^^ */
76131- atomic_long_inc(&rsp->expedited_workdone1);
76132+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
76133 return;
76134 }
76135
76136@@ -2404,7 +2404,7 @@ void synchronize_sched_expedited(void)
76137 udelay(trycount * num_online_cpus());
76138 } else {
76139 wait_rcu_gp(call_rcu_sched);
76140- atomic_long_inc(&rsp->expedited_normal);
76141+ atomic_long_inc_unchecked(&rsp->expedited_normal);
76142 return;
76143 }
76144
76145@@ -2413,7 +2413,7 @@ void synchronize_sched_expedited(void)
76146 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
76147 /* ensure test happens before caller kfree */
76148 smp_mb__before_atomic_inc(); /* ^^^ */
76149- atomic_long_inc(&rsp->expedited_workdone2);
76150+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
76151 return;
76152 }
76153
76154@@ -2425,10 +2425,10 @@ void synchronize_sched_expedited(void)
76155 * period works for us.
76156 */
76157 get_online_cpus();
76158- snap = atomic_long_read(&rsp->expedited_start);
76159+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
76160 smp_mb(); /* ensure read is before try_stop_cpus(). */
76161 }
76162- atomic_long_inc(&rsp->expedited_stoppedcpus);
76163+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
76164
76165 /*
76166 * Everyone up to our most recent fetch is covered by our grace
76167@@ -2437,16 +2437,16 @@ void synchronize_sched_expedited(void)
76168 * than we did already did their update.
76169 */
76170 do {
76171- atomic_long_inc(&rsp->expedited_done_tries);
76172+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
76173 s = atomic_long_read(&rsp->expedited_done);
76174 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
76175 /* ensure test happens before caller kfree */
76176 smp_mb__before_atomic_inc(); /* ^^^ */
76177- atomic_long_inc(&rsp->expedited_done_lost);
76178+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
76179 break;
76180 }
76181 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
76182- atomic_long_inc(&rsp->expedited_done_exit);
76183+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
76184
76185 put_online_cpus();
76186 }
76187@@ -2620,7 +2620,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
76188 * ACCESS_ONCE() to prevent the compiler from speculating
76189 * the increment to precede the early-exit check.
76190 */
76191- ACCESS_ONCE(rsp->n_barrier_done)++;
76192+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
76193 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
76194 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
76195 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
76196@@ -2670,7 +2670,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
76197
76198 /* Increment ->n_barrier_done to prevent duplicate work. */
76199 smp_mb(); /* Keep increment after above mechanism. */
76200- ACCESS_ONCE(rsp->n_barrier_done)++;
76201+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
76202 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
76203 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
76204 smp_mb(); /* Keep increment before caller's subsequent code. */
76205@@ -2715,10 +2715,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
76206 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
76207 init_callback_list(rdp);
76208 rdp->qlen_lazy = 0;
76209- ACCESS_ONCE(rdp->qlen) = 0;
76210+ ACCESS_ONCE_RW(rdp->qlen) = 0;
76211 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
76212 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
76213- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
76214+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
76215 #ifdef CONFIG_RCU_USER_QS
76216 WARN_ON_ONCE(rdp->dynticks->in_user);
76217 #endif
76218@@ -2754,8 +2754,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
76219 rdp->blimit = blimit;
76220 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
76221 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
76222- atomic_set(&rdp->dynticks->dynticks,
76223- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
76224+ atomic_set_unchecked(&rdp->dynticks->dynticks,
76225+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
76226 rcu_prepare_for_idle_init(cpu);
76227 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
76228
76229diff --git a/kernel/rcutree.h b/kernel/rcutree.h
76230index 4b69291..704c92e 100644
76231--- a/kernel/rcutree.h
76232+++ b/kernel/rcutree.h
76233@@ -86,7 +86,7 @@ struct rcu_dynticks {
76234 long long dynticks_nesting; /* Track irq/process nesting level. */
76235 /* Process level is worth LLONG_MAX/2. */
76236 int dynticks_nmi_nesting; /* Track NMI nesting level. */
76237- atomic_t dynticks; /* Even value for idle, else odd. */
76238+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
76239 #ifdef CONFIG_RCU_FAST_NO_HZ
76240 int dyntick_drain; /* Prepare-for-idle state variable. */
76241 unsigned long dyntick_holdoff;
76242@@ -423,17 +423,17 @@ struct rcu_state {
76243 /* _rcu_barrier(). */
76244 /* End of fields guarded by barrier_mutex. */
76245
76246- atomic_long_t expedited_start; /* Starting ticket. */
76247- atomic_long_t expedited_done; /* Done ticket. */
76248- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
76249- atomic_long_t expedited_tryfail; /* # acquisition failures. */
76250- atomic_long_t expedited_workdone1; /* # done by others #1. */
76251- atomic_long_t expedited_workdone2; /* # done by others #2. */
76252- atomic_long_t expedited_normal; /* # fallbacks to normal. */
76253- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
76254- atomic_long_t expedited_done_tries; /* # tries to update _done. */
76255- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
76256- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
76257+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
76258+ atomic_long_t expedited_done; /* Done ticket. */
76259+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
76260+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
76261+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
76262+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
76263+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
76264+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
76265+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
76266+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
76267+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
76268
76269 unsigned long jiffies_force_qs; /* Time at which to invoke */
76270 /* force_quiescent_state(). */
76271diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
76272index c1cc7e1..f62e436 100644
76273--- a/kernel/rcutree_plugin.h
76274+++ b/kernel/rcutree_plugin.h
76275@@ -892,7 +892,7 @@ void synchronize_rcu_expedited(void)
76276
76277 /* Clean up and exit. */
76278 smp_mb(); /* ensure expedited GP seen before counter increment. */
76279- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
76280+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
76281 unlock_mb_ret:
76282 mutex_unlock(&sync_rcu_preempt_exp_mutex);
76283 mb_ret:
76284@@ -1440,7 +1440,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
76285 free_cpumask_var(cm);
76286 }
76287
76288-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
76289+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
76290 .store = &rcu_cpu_kthread_task,
76291 .thread_should_run = rcu_cpu_kthread_should_run,
76292 .thread_fn = rcu_cpu_kthread,
76293@@ -2072,7 +2072,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
76294 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
76295 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d %s\n",
76296 cpu, ticks_value, ticks_title,
76297- atomic_read(&rdtp->dynticks) & 0xfff,
76298+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
76299 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
76300 fast_no_hz);
76301 }
76302@@ -2192,7 +2192,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
76303
76304 /* Enqueue the callback on the nocb list and update counts. */
76305 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
76306- ACCESS_ONCE(*old_rhpp) = rhp;
76307+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
76308 atomic_long_add(rhcount, &rdp->nocb_q_count);
76309 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
76310
76311@@ -2384,12 +2384,12 @@ static int rcu_nocb_kthread(void *arg)
76312 * Extract queued callbacks, update counts, and wait
76313 * for a grace period to elapse.
76314 */
76315- ACCESS_ONCE(rdp->nocb_head) = NULL;
76316+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
76317 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
76318 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
76319 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
76320- ACCESS_ONCE(rdp->nocb_p_count) += c;
76321- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
76322+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
76323+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
76324 wait_rcu_gp(rdp->rsp->call_remote);
76325
76326 /* Each pass through the following loop invokes a callback. */
76327@@ -2411,8 +2411,8 @@ static int rcu_nocb_kthread(void *arg)
76328 list = next;
76329 }
76330 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
76331- ACCESS_ONCE(rdp->nocb_p_count) -= c;
76332- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
76333+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
76334+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
76335 rdp->n_nocbs_invoked += c;
76336 }
76337 return 0;
76338@@ -2438,7 +2438,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
76339 rdp = per_cpu_ptr(rsp->rda, cpu);
76340 t = kthread_run(rcu_nocb_kthread, rdp, "rcuo%d", cpu);
76341 BUG_ON(IS_ERR(t));
76342- ACCESS_ONCE(rdp->nocb_kthread) = t;
76343+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
76344 }
76345 }
76346
76347diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
76348index 0d095dc..1985b19 100644
76349--- a/kernel/rcutree_trace.c
76350+++ b/kernel/rcutree_trace.c
76351@@ -123,7 +123,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
76352 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
76353 rdp->passed_quiesce, rdp->qs_pending);
76354 seq_printf(m, " dt=%d/%llx/%d df=%lu",
76355- atomic_read(&rdp->dynticks->dynticks),
76356+ atomic_read_unchecked(&rdp->dynticks->dynticks),
76357 rdp->dynticks->dynticks_nesting,
76358 rdp->dynticks->dynticks_nmi_nesting,
76359 rdp->dynticks_fqs);
76360@@ -184,17 +184,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
76361 struct rcu_state *rsp = (struct rcu_state *)m->private;
76362
76363 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
76364- atomic_long_read(&rsp->expedited_start),
76365+ atomic_long_read_unchecked(&rsp->expedited_start),
76366 atomic_long_read(&rsp->expedited_done),
76367- atomic_long_read(&rsp->expedited_wrap),
76368- atomic_long_read(&rsp->expedited_tryfail),
76369- atomic_long_read(&rsp->expedited_workdone1),
76370- atomic_long_read(&rsp->expedited_workdone2),
76371- atomic_long_read(&rsp->expedited_normal),
76372- atomic_long_read(&rsp->expedited_stoppedcpus),
76373- atomic_long_read(&rsp->expedited_done_tries),
76374- atomic_long_read(&rsp->expedited_done_lost),
76375- atomic_long_read(&rsp->expedited_done_exit));
76376+ atomic_long_read_unchecked(&rsp->expedited_wrap),
76377+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
76378+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
76379+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
76380+ atomic_long_read_unchecked(&rsp->expedited_normal),
76381+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
76382+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
76383+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
76384+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
76385 return 0;
76386 }
76387
76388diff --git a/kernel/resource.c b/kernel/resource.c
76389index 73f35d4..4684fc4 100644
76390--- a/kernel/resource.c
76391+++ b/kernel/resource.c
76392@@ -143,8 +143,18 @@ static const struct file_operations proc_iomem_operations = {
76393
76394 static int __init ioresources_init(void)
76395 {
76396+#ifdef CONFIG_GRKERNSEC_PROC_ADD
76397+#ifdef CONFIG_GRKERNSEC_PROC_USER
76398+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
76399+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
76400+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
76401+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
76402+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
76403+#endif
76404+#else
76405 proc_create("ioports", 0, NULL, &proc_ioports_operations);
76406 proc_create("iomem", 0, NULL, &proc_iomem_operations);
76407+#endif
76408 return 0;
76409 }
76410 __initcall(ioresources_init);
76411diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
76412index 98ec494..4241d6d 100644
76413--- a/kernel/rtmutex-tester.c
76414+++ b/kernel/rtmutex-tester.c
76415@@ -20,7 +20,7 @@
76416 #define MAX_RT_TEST_MUTEXES 8
76417
76418 static spinlock_t rttest_lock;
76419-static atomic_t rttest_event;
76420+static atomic_unchecked_t rttest_event;
76421
76422 struct test_thread_data {
76423 int opcode;
76424@@ -61,7 +61,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76425
76426 case RTTEST_LOCKCONT:
76427 td->mutexes[td->opdata] = 1;
76428- td->event = atomic_add_return(1, &rttest_event);
76429+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76430 return 0;
76431
76432 case RTTEST_RESET:
76433@@ -74,7 +74,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76434 return 0;
76435
76436 case RTTEST_RESETEVENT:
76437- atomic_set(&rttest_event, 0);
76438+ atomic_set_unchecked(&rttest_event, 0);
76439 return 0;
76440
76441 default:
76442@@ -91,9 +91,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76443 return ret;
76444
76445 td->mutexes[id] = 1;
76446- td->event = atomic_add_return(1, &rttest_event);
76447+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76448 rt_mutex_lock(&mutexes[id]);
76449- td->event = atomic_add_return(1, &rttest_event);
76450+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76451 td->mutexes[id] = 4;
76452 return 0;
76453
76454@@ -104,9 +104,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76455 return ret;
76456
76457 td->mutexes[id] = 1;
76458- td->event = atomic_add_return(1, &rttest_event);
76459+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76460 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
76461- td->event = atomic_add_return(1, &rttest_event);
76462+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76463 td->mutexes[id] = ret ? 0 : 4;
76464 return ret ? -EINTR : 0;
76465
76466@@ -115,9 +115,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
76467 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
76468 return ret;
76469
76470- td->event = atomic_add_return(1, &rttest_event);
76471+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76472 rt_mutex_unlock(&mutexes[id]);
76473- td->event = atomic_add_return(1, &rttest_event);
76474+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76475 td->mutexes[id] = 0;
76476 return 0;
76477
76478@@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76479 break;
76480
76481 td->mutexes[dat] = 2;
76482- td->event = atomic_add_return(1, &rttest_event);
76483+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76484 break;
76485
76486 default:
76487@@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76488 return;
76489
76490 td->mutexes[dat] = 3;
76491- td->event = atomic_add_return(1, &rttest_event);
76492+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76493 break;
76494
76495 case RTTEST_LOCKNOWAIT:
76496@@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
76497 return;
76498
76499 td->mutexes[dat] = 1;
76500- td->event = atomic_add_return(1, &rttest_event);
76501+ td->event = atomic_add_return_unchecked(1, &rttest_event);
76502 return;
76503
76504 default:
76505diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
76506index 0984a21..939f183 100644
76507--- a/kernel/sched/auto_group.c
76508+++ b/kernel/sched/auto_group.c
76509@@ -11,7 +11,7 @@
76510
76511 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
76512 static struct autogroup autogroup_default;
76513-static atomic_t autogroup_seq_nr;
76514+static atomic_unchecked_t autogroup_seq_nr;
76515
76516 void __init autogroup_init(struct task_struct *init_task)
76517 {
76518@@ -78,7 +78,7 @@ static inline struct autogroup *autogroup_create(void)
76519
76520 kref_init(&ag->kref);
76521 init_rwsem(&ag->lock);
76522- ag->id = atomic_inc_return(&autogroup_seq_nr);
76523+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
76524 ag->tg = tg;
76525 #ifdef CONFIG_RT_GROUP_SCHED
76526 /*
76527diff --git a/kernel/sched/core.c b/kernel/sched/core.c
76528index 26058d0..f9d3c76 100644
76529--- a/kernel/sched/core.c
76530+++ b/kernel/sched/core.c
76531@@ -3631,6 +3631,8 @@ int can_nice(const struct task_struct *p, const int nice)
76532 /* convert nice value [19,-20] to rlimit style value [1,40] */
76533 int nice_rlim = 20 - nice;
76534
76535+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
76536+
76537 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
76538 capable(CAP_SYS_NICE));
76539 }
76540@@ -3664,7 +3666,8 @@ SYSCALL_DEFINE1(nice, int, increment)
76541 if (nice > 19)
76542 nice = 19;
76543
76544- if (increment < 0 && !can_nice(current, nice))
76545+ if (increment < 0 && (!can_nice(current, nice) ||
76546+ gr_handle_chroot_nice()))
76547 return -EPERM;
76548
76549 retval = security_task_setnice(current, nice);
76550@@ -3818,6 +3821,7 @@ recheck:
76551 unsigned long rlim_rtprio =
76552 task_rlimit(p, RLIMIT_RTPRIO);
76553
76554+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
76555 /* can't set/change the rt policy */
76556 if (policy != p->policy && !rlim_rtprio)
76557 return -EPERM;
76558@@ -4901,7 +4905,7 @@ static void migrate_tasks(unsigned int dead_cpu)
76559
76560 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
76561
76562-static struct ctl_table sd_ctl_dir[] = {
76563+static ctl_table_no_const sd_ctl_dir[] __read_only = {
76564 {
76565 .procname = "sched_domain",
76566 .mode = 0555,
76567@@ -4918,17 +4922,17 @@ static struct ctl_table sd_ctl_root[] = {
76568 {}
76569 };
76570
76571-static struct ctl_table *sd_alloc_ctl_entry(int n)
76572+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
76573 {
76574- struct ctl_table *entry =
76575+ ctl_table_no_const *entry =
76576 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
76577
76578 return entry;
76579 }
76580
76581-static void sd_free_ctl_entry(struct ctl_table **tablep)
76582+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
76583 {
76584- struct ctl_table *entry;
76585+ ctl_table_no_const *entry;
76586
76587 /*
76588 * In the intermediate directories, both the child directory and
76589@@ -4936,22 +4940,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
76590 * will always be set. In the lowest directory the names are
76591 * static strings and all have proc handlers.
76592 */
76593- for (entry = *tablep; entry->mode; entry++) {
76594- if (entry->child)
76595- sd_free_ctl_entry(&entry->child);
76596+ for (entry = tablep; entry->mode; entry++) {
76597+ if (entry->child) {
76598+ sd_free_ctl_entry(entry->child);
76599+ pax_open_kernel();
76600+ entry->child = NULL;
76601+ pax_close_kernel();
76602+ }
76603 if (entry->proc_handler == NULL)
76604 kfree(entry->procname);
76605 }
76606
76607- kfree(*tablep);
76608- *tablep = NULL;
76609+ kfree(tablep);
76610 }
76611
76612 static int min_load_idx = 0;
76613 static int max_load_idx = CPU_LOAD_IDX_MAX;
76614
76615 static void
76616-set_table_entry(struct ctl_table *entry,
76617+set_table_entry(ctl_table_no_const *entry,
76618 const char *procname, void *data, int maxlen,
76619 umode_t mode, proc_handler *proc_handler,
76620 bool load_idx)
76621@@ -4971,7 +4978,7 @@ set_table_entry(struct ctl_table *entry,
76622 static struct ctl_table *
76623 sd_alloc_ctl_domain_table(struct sched_domain *sd)
76624 {
76625- struct ctl_table *table = sd_alloc_ctl_entry(13);
76626+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
76627
76628 if (table == NULL)
76629 return NULL;
76630@@ -5006,9 +5013,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
76631 return table;
76632 }
76633
76634-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
76635+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
76636 {
76637- struct ctl_table *entry, *table;
76638+ ctl_table_no_const *entry, *table;
76639 struct sched_domain *sd;
76640 int domain_num = 0, i;
76641 char buf[32];
76642@@ -5035,11 +5042,13 @@ static struct ctl_table_header *sd_sysctl_header;
76643 static void register_sched_domain_sysctl(void)
76644 {
76645 int i, cpu_num = num_possible_cpus();
76646- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
76647+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
76648 char buf[32];
76649
76650 WARN_ON(sd_ctl_dir[0].child);
76651+ pax_open_kernel();
76652 sd_ctl_dir[0].child = entry;
76653+ pax_close_kernel();
76654
76655 if (entry == NULL)
76656 return;
76657@@ -5062,8 +5071,12 @@ static void unregister_sched_domain_sysctl(void)
76658 if (sd_sysctl_header)
76659 unregister_sysctl_table(sd_sysctl_header);
76660 sd_sysctl_header = NULL;
76661- if (sd_ctl_dir[0].child)
76662- sd_free_ctl_entry(&sd_ctl_dir[0].child);
76663+ if (sd_ctl_dir[0].child) {
76664+ sd_free_ctl_entry(sd_ctl_dir[0].child);
76665+ pax_open_kernel();
76666+ sd_ctl_dir[0].child = NULL;
76667+ pax_close_kernel();
76668+ }
76669 }
76670 #else
76671 static void register_sched_domain_sysctl(void)
76672@@ -5162,7 +5175,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
76673 * happens before everything else. This has to be lower priority than
76674 * the notifier in the perf_event subsystem, though.
76675 */
76676-static struct notifier_block __cpuinitdata migration_notifier = {
76677+static struct notifier_block migration_notifier = {
76678 .notifier_call = migration_call,
76679 .priority = CPU_PRI_MIGRATION,
76680 };
76681diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
76682index 81fa536..6ccf96a 100644
76683--- a/kernel/sched/fair.c
76684+++ b/kernel/sched/fair.c
76685@@ -830,7 +830,7 @@ void task_numa_fault(int node, int pages, bool migrated)
76686
76687 static void reset_ptenuma_scan(struct task_struct *p)
76688 {
76689- ACCESS_ONCE(p->mm->numa_scan_seq)++;
76690+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
76691 p->mm->numa_scan_offset = 0;
76692 }
76693
76694@@ -3254,25 +3254,18 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)
76695 */
76696 static int select_idle_sibling(struct task_struct *p, int target)
76697 {
76698- int cpu = smp_processor_id();
76699- int prev_cpu = task_cpu(p);
76700 struct sched_domain *sd;
76701 struct sched_group *sg;
76702- int i;
76703+ int i = task_cpu(p);
76704
76705- /*
76706- * If the task is going to be woken-up on this cpu and if it is
76707- * already idle, then it is the right target.
76708- */
76709- if (target == cpu && idle_cpu(cpu))
76710- return cpu;
76711+ if (idle_cpu(target))
76712+ return target;
76713
76714 /*
76715- * If the task is going to be woken-up on the cpu where it previously
76716- * ran and if it is currently idle, then it the right target.
76717+ * If the prevous cpu is cache affine and idle, don't be stupid.
76718 */
76719- if (target == prev_cpu && idle_cpu(prev_cpu))
76720- return prev_cpu;
76721+ if (i != target && cpus_share_cache(i, target) && idle_cpu(i))
76722+ return i;
76723
76724 /*
76725 * Otherwise, iterate the domains and find an elegible idle cpu.
76726@@ -3286,7 +3279,7 @@ static int select_idle_sibling(struct task_struct *p, int target)
76727 goto next;
76728
76729 for_each_cpu(i, sched_group_cpus(sg)) {
76730- if (!idle_cpu(i))
76731+ if (i == target || !idle_cpu(i))
76732 goto next;
76733 }
76734
76735@@ -5663,7 +5656,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
76736 * run_rebalance_domains is triggered when needed from the scheduler tick.
76737 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
76738 */
76739-static void run_rebalance_domains(struct softirq_action *h)
76740+static void run_rebalance_domains(void)
76741 {
76742 int this_cpu = smp_processor_id();
76743 struct rq *this_rq = cpu_rq(this_cpu);
76744diff --git a/kernel/signal.c b/kernel/signal.c
76745index 3d09cf6..a67d2c6 100644
76746--- a/kernel/signal.c
76747+++ b/kernel/signal.c
76748@@ -50,12 +50,12 @@ static struct kmem_cache *sigqueue_cachep;
76749
76750 int print_fatal_signals __read_mostly;
76751
76752-static void __user *sig_handler(struct task_struct *t, int sig)
76753+static __sighandler_t sig_handler(struct task_struct *t, int sig)
76754 {
76755 return t->sighand->action[sig - 1].sa.sa_handler;
76756 }
76757
76758-static int sig_handler_ignored(void __user *handler, int sig)
76759+static int sig_handler_ignored(__sighandler_t handler, int sig)
76760 {
76761 /* Is it explicitly or implicitly ignored? */
76762 return handler == SIG_IGN ||
76763@@ -64,7 +64,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
76764
76765 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
76766 {
76767- void __user *handler;
76768+ __sighandler_t handler;
76769
76770 handler = sig_handler(t, sig);
76771
76772@@ -368,6 +368,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
76773 atomic_inc(&user->sigpending);
76774 rcu_read_unlock();
76775
76776+ if (!override_rlimit)
76777+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
76778+
76779 if (override_rlimit ||
76780 atomic_read(&user->sigpending) <=
76781 task_rlimit(t, RLIMIT_SIGPENDING)) {
76782@@ -492,7 +495,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
76783
76784 int unhandled_signal(struct task_struct *tsk, int sig)
76785 {
76786- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
76787+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
76788 if (is_global_init(tsk))
76789 return 1;
76790 if (handler != SIG_IGN && handler != SIG_DFL)
76791@@ -812,6 +815,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
76792 }
76793 }
76794
76795+ /* allow glibc communication via tgkill to other threads in our
76796+ thread group */
76797+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
76798+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
76799+ && gr_handle_signal(t, sig))
76800+ return -EPERM;
76801+
76802 return security_task_kill(t, info, sig, 0);
76803 }
76804
76805@@ -1194,7 +1204,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
76806 return send_signal(sig, info, p, 1);
76807 }
76808
76809-static int
76810+int
76811 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76812 {
76813 return send_signal(sig, info, t, 0);
76814@@ -1231,6 +1241,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76815 unsigned long int flags;
76816 int ret, blocked, ignored;
76817 struct k_sigaction *action;
76818+ int is_unhandled = 0;
76819
76820 spin_lock_irqsave(&t->sighand->siglock, flags);
76821 action = &t->sighand->action[sig-1];
76822@@ -1245,9 +1256,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
76823 }
76824 if (action->sa.sa_handler == SIG_DFL)
76825 t->signal->flags &= ~SIGNAL_UNKILLABLE;
76826+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
76827+ is_unhandled = 1;
76828 ret = specific_send_sig_info(sig, info, t);
76829 spin_unlock_irqrestore(&t->sighand->siglock, flags);
76830
76831+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
76832+ normal operation */
76833+ if (is_unhandled) {
76834+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
76835+ gr_handle_crash(t, sig);
76836+ }
76837+
76838 return ret;
76839 }
76840
76841@@ -1314,8 +1334,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
76842 ret = check_kill_permission(sig, info, p);
76843 rcu_read_unlock();
76844
76845- if (!ret && sig)
76846+ if (!ret && sig) {
76847 ret = do_send_sig_info(sig, info, p, true);
76848+ if (!ret)
76849+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
76850+ }
76851
76852 return ret;
76853 }
76854@@ -2852,7 +2875,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
76855 int error = -ESRCH;
76856
76857 rcu_read_lock();
76858- p = find_task_by_vpid(pid);
76859+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
76860+ /* allow glibc communication via tgkill to other threads in our
76861+ thread group */
76862+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
76863+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
76864+ p = find_task_by_vpid_unrestricted(pid);
76865+ else
76866+#endif
76867+ p = find_task_by_vpid(pid);
76868 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
76869 error = check_kill_permission(sig, info, p);
76870 /*
76871@@ -3135,8 +3166,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
76872 }
76873 seg = get_fs();
76874 set_fs(KERNEL_DS);
76875- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
76876- (stack_t __force __user *) &uoss,
76877+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
76878+ (stack_t __force_user *) &uoss,
76879 compat_user_stack_pointer());
76880 set_fs(seg);
76881 if (ret >= 0 && uoss_ptr) {
76882diff --git a/kernel/smp.c b/kernel/smp.c
76883index 69f38bd..77bbf12 100644
76884--- a/kernel/smp.c
76885+++ b/kernel/smp.c
76886@@ -77,7 +77,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
76887 return NOTIFY_OK;
76888 }
76889
76890-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
76891+static struct notifier_block hotplug_cfd_notifier = {
76892 .notifier_call = hotplug_cfd,
76893 };
76894
76895diff --git a/kernel/smpboot.c b/kernel/smpboot.c
76896index d6c5fc0..530560c 100644
76897--- a/kernel/smpboot.c
76898+++ b/kernel/smpboot.c
76899@@ -275,7 +275,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
76900 }
76901 smpboot_unpark_thread(plug_thread, cpu);
76902 }
76903- list_add(&plug_thread->list, &hotplug_threads);
76904+ pax_list_add(&plug_thread->list, &hotplug_threads);
76905 out:
76906 mutex_unlock(&smpboot_threads_lock);
76907 return ret;
76908@@ -292,7 +292,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
76909 {
76910 get_online_cpus();
76911 mutex_lock(&smpboot_threads_lock);
76912- list_del(&plug_thread->list);
76913+ pax_list_del(&plug_thread->list);
76914 smpboot_destroy_threads(plug_thread);
76915 mutex_unlock(&smpboot_threads_lock);
76916 put_online_cpus();
76917diff --git a/kernel/softirq.c b/kernel/softirq.c
76918index ed567ba..e71dabf 100644
76919--- a/kernel/softirq.c
76920+++ b/kernel/softirq.c
76921@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
76922 EXPORT_SYMBOL(irq_stat);
76923 #endif
76924
76925-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
76926+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
76927
76928 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
76929
76930-char *softirq_to_name[NR_SOFTIRQS] = {
76931+const char * const softirq_to_name[NR_SOFTIRQS] = {
76932 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
76933 "TASKLET", "SCHED", "HRTIMER", "RCU"
76934 };
76935@@ -244,7 +244,7 @@ restart:
76936 kstat_incr_softirqs_this_cpu(vec_nr);
76937
76938 trace_softirq_entry(vec_nr);
76939- h->action(h);
76940+ h->action();
76941 trace_softirq_exit(vec_nr);
76942 if (unlikely(prev_count != preempt_count())) {
76943 printk(KERN_ERR "huh, entered softirq %u %s %p"
76944@@ -391,7 +391,7 @@ void __raise_softirq_irqoff(unsigned int nr)
76945 or_softirq_pending(1UL << nr);
76946 }
76947
76948-void open_softirq(int nr, void (*action)(struct softirq_action *))
76949+void __init open_softirq(int nr, void (*action)(void))
76950 {
76951 softirq_vec[nr].action = action;
76952 }
76953@@ -447,7 +447,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
76954
76955 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
76956
76957-static void tasklet_action(struct softirq_action *a)
76958+static void tasklet_action(void)
76959 {
76960 struct tasklet_struct *list;
76961
76962@@ -482,7 +482,7 @@ static void tasklet_action(struct softirq_action *a)
76963 }
76964 }
76965
76966-static void tasklet_hi_action(struct softirq_action *a)
76967+static void tasklet_hi_action(void)
76968 {
76969 struct tasklet_struct *list;
76970
76971@@ -718,7 +718,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
76972 return NOTIFY_OK;
76973 }
76974
76975-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
76976+static struct notifier_block remote_softirq_cpu_notifier = {
76977 .notifier_call = remote_softirq_cpu_notify,
76978 };
76979
76980@@ -835,11 +835,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
76981 return NOTIFY_OK;
76982 }
76983
76984-static struct notifier_block __cpuinitdata cpu_nfb = {
76985+static struct notifier_block cpu_nfb = {
76986 .notifier_call = cpu_callback
76987 };
76988
76989-static struct smp_hotplug_thread softirq_threads = {
76990+static struct smp_hotplug_thread softirq_threads __read_only = {
76991 .store = &ksoftirqd,
76992 .thread_should_run = ksoftirqd_should_run,
76993 .thread_fn = run_ksoftirqd,
76994diff --git a/kernel/srcu.c b/kernel/srcu.c
76995index 2b85982..d52ab26 100644
76996--- a/kernel/srcu.c
76997+++ b/kernel/srcu.c
76998@@ -305,9 +305,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
76999 preempt_disable();
77000 idx = rcu_dereference_index_check(sp->completed,
77001 rcu_read_lock_sched_held()) & 0x1;
77002- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
77003+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
77004 smp_mb(); /* B */ /* Avoid leaking the critical section. */
77005- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
77006+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
77007 preempt_enable();
77008 return idx;
77009 }
77010@@ -323,7 +323,7 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
77011 {
77012 preempt_disable();
77013 smp_mb(); /* C */ /* Avoid leaking the critical section. */
77014- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
77015+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) -= 1;
77016 preempt_enable();
77017 }
77018 EXPORT_SYMBOL_GPL(__srcu_read_unlock);
77019diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
77020index 2f194e9..2c05ea9 100644
77021--- a/kernel/stop_machine.c
77022+++ b/kernel/stop_machine.c
77023@@ -362,7 +362,7 @@ static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb,
77024 * cpu notifiers. It currently shares the same priority as sched
77025 * migration_notifier.
77026 */
77027-static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = {
77028+static struct notifier_block cpu_stop_cpu_notifier = {
77029 .notifier_call = cpu_stop_cpu_callback,
77030 .priority = 10,
77031 };
77032diff --git a/kernel/sys.c b/kernel/sys.c
77033index 265b376..4e42ef5 100644
77034--- a/kernel/sys.c
77035+++ b/kernel/sys.c
77036@@ -157,6 +157,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
77037 error = -EACCES;
77038 goto out;
77039 }
77040+
77041+ if (gr_handle_chroot_setpriority(p, niceval)) {
77042+ error = -EACCES;
77043+ goto out;
77044+ }
77045+
77046 no_nice = security_task_setnice(p, niceval);
77047 if (no_nice) {
77048 error = no_nice;
77049@@ -595,6 +601,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
77050 goto error;
77051 }
77052
77053+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
77054+ goto error;
77055+
77056 if (rgid != (gid_t) -1 ||
77057 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
77058 new->sgid = new->egid;
77059@@ -630,6 +639,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
77060 old = current_cred();
77061
77062 retval = -EPERM;
77063+
77064+ if (gr_check_group_change(kgid, kgid, kgid))
77065+ goto error;
77066+
77067 if (nsown_capable(CAP_SETGID))
77068 new->gid = new->egid = new->sgid = new->fsgid = kgid;
77069 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
77070@@ -647,7 +660,7 @@ error:
77071 /*
77072 * change the user struct in a credentials set to match the new UID
77073 */
77074-static int set_user(struct cred *new)
77075+int set_user(struct cred *new)
77076 {
77077 struct user_struct *new_user;
77078
77079@@ -727,6 +740,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
77080 goto error;
77081 }
77082
77083+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
77084+ goto error;
77085+
77086 if (!uid_eq(new->uid, old->uid)) {
77087 retval = set_user(new);
77088 if (retval < 0)
77089@@ -777,6 +793,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
77090 old = current_cred();
77091
77092 retval = -EPERM;
77093+
77094+ if (gr_check_crash_uid(kuid))
77095+ goto error;
77096+ if (gr_check_user_change(kuid, kuid, kuid))
77097+ goto error;
77098+
77099 if (nsown_capable(CAP_SETUID)) {
77100 new->suid = new->uid = kuid;
77101 if (!uid_eq(kuid, old->uid)) {
77102@@ -846,6 +868,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
77103 goto error;
77104 }
77105
77106+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
77107+ goto error;
77108+
77109 if (ruid != (uid_t) -1) {
77110 new->uid = kruid;
77111 if (!uid_eq(kruid, old->uid)) {
77112@@ -928,6 +953,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
77113 goto error;
77114 }
77115
77116+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
77117+ goto error;
77118+
77119 if (rgid != (gid_t) -1)
77120 new->gid = krgid;
77121 if (egid != (gid_t) -1)
77122@@ -981,6 +1009,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
77123 if (!uid_valid(kuid))
77124 return old_fsuid;
77125
77126+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
77127+ goto error;
77128+
77129 new = prepare_creds();
77130 if (!new)
77131 return old_fsuid;
77132@@ -995,6 +1026,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
77133 }
77134 }
77135
77136+error:
77137 abort_creds(new);
77138 return old_fsuid;
77139
77140@@ -1027,12 +1059,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
77141 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
77142 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
77143 nsown_capable(CAP_SETGID)) {
77144+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
77145+ goto error;
77146+
77147 if (!gid_eq(kgid, old->fsgid)) {
77148 new->fsgid = kgid;
77149 goto change_okay;
77150 }
77151 }
77152
77153+error:
77154 abort_creds(new);
77155 return old_fsgid;
77156
77157@@ -1340,19 +1376,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
77158 return -EFAULT;
77159
77160 down_read(&uts_sem);
77161- error = __copy_to_user(&name->sysname, &utsname()->sysname,
77162+ error = __copy_to_user(name->sysname, &utsname()->sysname,
77163 __OLD_UTS_LEN);
77164 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
77165- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
77166+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
77167 __OLD_UTS_LEN);
77168 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
77169- error |= __copy_to_user(&name->release, &utsname()->release,
77170+ error |= __copy_to_user(name->release, &utsname()->release,
77171 __OLD_UTS_LEN);
77172 error |= __put_user(0, name->release + __OLD_UTS_LEN);
77173- error |= __copy_to_user(&name->version, &utsname()->version,
77174+ error |= __copy_to_user(name->version, &utsname()->version,
77175 __OLD_UTS_LEN);
77176 error |= __put_user(0, name->version + __OLD_UTS_LEN);
77177- error |= __copy_to_user(&name->machine, &utsname()->machine,
77178+ error |= __copy_to_user(name->machine, &utsname()->machine,
77179 __OLD_UTS_LEN);
77180 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
77181 up_read(&uts_sem);
77182@@ -2026,7 +2062,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
77183 error = get_dumpable(me->mm);
77184 break;
77185 case PR_SET_DUMPABLE:
77186- if (arg2 < 0 || arg2 > 1) {
77187+ if (arg2 > 1) {
77188 error = -EINVAL;
77189 break;
77190 }
77191diff --git a/kernel/sysctl.c b/kernel/sysctl.c
77192index c88878d..e4fa5d1 100644
77193--- a/kernel/sysctl.c
77194+++ b/kernel/sysctl.c
77195@@ -92,7 +92,6 @@
77196
77197
77198 #if defined(CONFIG_SYSCTL)
77199-
77200 /* External variables not in a header file. */
77201 extern int sysctl_overcommit_memory;
77202 extern int sysctl_overcommit_ratio;
77203@@ -172,10 +171,8 @@ static int proc_taint(struct ctl_table *table, int write,
77204 void __user *buffer, size_t *lenp, loff_t *ppos);
77205 #endif
77206
77207-#ifdef CONFIG_PRINTK
77208 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
77209 void __user *buffer, size_t *lenp, loff_t *ppos);
77210-#endif
77211
77212 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
77213 void __user *buffer, size_t *lenp, loff_t *ppos);
77214@@ -206,6 +203,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
77215
77216 #endif
77217
77218+extern struct ctl_table grsecurity_table[];
77219+
77220 static struct ctl_table kern_table[];
77221 static struct ctl_table vm_table[];
77222 static struct ctl_table fs_table[];
77223@@ -220,6 +219,20 @@ extern struct ctl_table epoll_table[];
77224 int sysctl_legacy_va_layout;
77225 #endif
77226
77227+#ifdef CONFIG_PAX_SOFTMODE
77228+static ctl_table pax_table[] = {
77229+ {
77230+ .procname = "softmode",
77231+ .data = &pax_softmode,
77232+ .maxlen = sizeof(unsigned int),
77233+ .mode = 0600,
77234+ .proc_handler = &proc_dointvec,
77235+ },
77236+
77237+ { }
77238+};
77239+#endif
77240+
77241 /* The default sysctl tables: */
77242
77243 static struct ctl_table sysctl_base_table[] = {
77244@@ -268,6 +281,22 @@ static int max_extfrag_threshold = 1000;
77245 #endif
77246
77247 static struct ctl_table kern_table[] = {
77248+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
77249+ {
77250+ .procname = "grsecurity",
77251+ .mode = 0500,
77252+ .child = grsecurity_table,
77253+ },
77254+#endif
77255+
77256+#ifdef CONFIG_PAX_SOFTMODE
77257+ {
77258+ .procname = "pax",
77259+ .mode = 0500,
77260+ .child = pax_table,
77261+ },
77262+#endif
77263+
77264 {
77265 .procname = "sched_child_runs_first",
77266 .data = &sysctl_sched_child_runs_first,
77267@@ -593,7 +622,7 @@ static struct ctl_table kern_table[] = {
77268 .data = &modprobe_path,
77269 .maxlen = KMOD_PATH_LEN,
77270 .mode = 0644,
77271- .proc_handler = proc_dostring,
77272+ .proc_handler = proc_dostring_modpriv,
77273 },
77274 {
77275 .procname = "modules_disabled",
77276@@ -760,16 +789,20 @@ static struct ctl_table kern_table[] = {
77277 .extra1 = &zero,
77278 .extra2 = &one,
77279 },
77280+#endif
77281 {
77282 .procname = "kptr_restrict",
77283 .data = &kptr_restrict,
77284 .maxlen = sizeof(int),
77285 .mode = 0644,
77286 .proc_handler = proc_dointvec_minmax_sysadmin,
77287+#ifdef CONFIG_GRKERNSEC_HIDESYM
77288+ .extra1 = &two,
77289+#else
77290 .extra1 = &zero,
77291+#endif
77292 .extra2 = &two,
77293 },
77294-#endif
77295 {
77296 .procname = "ngroups_max",
77297 .data = &ngroups_max,
77298@@ -1266,6 +1299,13 @@ static struct ctl_table vm_table[] = {
77299 .proc_handler = proc_dointvec_minmax,
77300 .extra1 = &zero,
77301 },
77302+ {
77303+ .procname = "heap_stack_gap",
77304+ .data = &sysctl_heap_stack_gap,
77305+ .maxlen = sizeof(sysctl_heap_stack_gap),
77306+ .mode = 0644,
77307+ .proc_handler = proc_doulongvec_minmax,
77308+ },
77309 #else
77310 {
77311 .procname = "nr_trim_pages",
77312@@ -1716,6 +1756,16 @@ int proc_dostring(struct ctl_table *table, int write,
77313 buffer, lenp, ppos);
77314 }
77315
77316+int proc_dostring_modpriv(struct ctl_table *table, int write,
77317+ void __user *buffer, size_t *lenp, loff_t *ppos)
77318+{
77319+ if (write && !capable(CAP_SYS_MODULE))
77320+ return -EPERM;
77321+
77322+ return _proc_do_string(table->data, table->maxlen, write,
77323+ buffer, lenp, ppos);
77324+}
77325+
77326 static size_t proc_skip_spaces(char **buf)
77327 {
77328 size_t ret;
77329@@ -1821,6 +1871,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
77330 len = strlen(tmp);
77331 if (len > *size)
77332 len = *size;
77333+ if (len > sizeof(tmp))
77334+ len = sizeof(tmp);
77335 if (copy_to_user(*buf, tmp, len))
77336 return -EFAULT;
77337 *size -= len;
77338@@ -1985,7 +2037,7 @@ int proc_dointvec(struct ctl_table *table, int write,
77339 static int proc_taint(struct ctl_table *table, int write,
77340 void __user *buffer, size_t *lenp, loff_t *ppos)
77341 {
77342- struct ctl_table t;
77343+ ctl_table_no_const t;
77344 unsigned long tmptaint = get_taint();
77345 int err;
77346
77347@@ -2013,7 +2065,6 @@ static int proc_taint(struct ctl_table *table, int write,
77348 return err;
77349 }
77350
77351-#ifdef CONFIG_PRINTK
77352 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
77353 void __user *buffer, size_t *lenp, loff_t *ppos)
77354 {
77355@@ -2022,7 +2073,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
77356
77357 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
77358 }
77359-#endif
77360
77361 struct do_proc_dointvec_minmax_conv_param {
77362 int *min;
77363@@ -2169,8 +2219,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
77364 *i = val;
77365 } else {
77366 val = convdiv * (*i) / convmul;
77367- if (!first)
77368+ if (!first) {
77369 err = proc_put_char(&buffer, &left, '\t');
77370+ if (err)
77371+ break;
77372+ }
77373 err = proc_put_long(&buffer, &left, val, false);
77374 if (err)
77375 break;
77376@@ -2562,6 +2615,12 @@ int proc_dostring(struct ctl_table *table, int write,
77377 return -ENOSYS;
77378 }
77379
77380+int proc_dostring_modpriv(struct ctl_table *table, int write,
77381+ void __user *buffer, size_t *lenp, loff_t *ppos)
77382+{
77383+ return -ENOSYS;
77384+}
77385+
77386 int proc_dointvec(struct ctl_table *table, int write,
77387 void __user *buffer, size_t *lenp, loff_t *ppos)
77388 {
77389@@ -2618,5 +2677,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
77390 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
77391 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
77392 EXPORT_SYMBOL(proc_dostring);
77393+EXPORT_SYMBOL(proc_dostring_modpriv);
77394 EXPORT_SYMBOL(proc_doulongvec_minmax);
77395 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
77396diff --git a/kernel/sysctl_binary.c b/kernel/sysctl_binary.c
77397index 0ddf3a0..a199f50 100644
77398--- a/kernel/sysctl_binary.c
77399+++ b/kernel/sysctl_binary.c
77400@@ -989,7 +989,7 @@ static ssize_t bin_intvec(struct file *file,
77401 int i;
77402
77403 set_fs(KERNEL_DS);
77404- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
77405+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
77406 set_fs(old_fs);
77407 if (result < 0)
77408 goto out_kfree;
77409@@ -1034,7 +1034,7 @@ static ssize_t bin_intvec(struct file *file,
77410 }
77411
77412 set_fs(KERNEL_DS);
77413- result = vfs_write(file, buffer, str - buffer, &pos);
77414+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
77415 set_fs(old_fs);
77416 if (result < 0)
77417 goto out_kfree;
77418@@ -1067,7 +1067,7 @@ static ssize_t bin_ulongvec(struct file *file,
77419 int i;
77420
77421 set_fs(KERNEL_DS);
77422- result = vfs_read(file, buffer, BUFSZ - 1, &pos);
77423+ result = vfs_read(file, (char __force_user *)buffer, BUFSZ - 1, &pos);
77424 set_fs(old_fs);
77425 if (result < 0)
77426 goto out_kfree;
77427@@ -1112,7 +1112,7 @@ static ssize_t bin_ulongvec(struct file *file,
77428 }
77429
77430 set_fs(KERNEL_DS);
77431- result = vfs_write(file, buffer, str - buffer, &pos);
77432+ result = vfs_write(file, (const char __force_user *)buffer, str - buffer, &pos);
77433 set_fs(old_fs);
77434 if (result < 0)
77435 goto out_kfree;
77436@@ -1138,7 +1138,7 @@ static ssize_t bin_uuid(struct file *file,
77437 int i;
77438
77439 set_fs(KERNEL_DS);
77440- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
77441+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
77442 set_fs(old_fs);
77443 if (result < 0)
77444 goto out;
77445@@ -1185,7 +1185,7 @@ static ssize_t bin_dn_node_address(struct file *file,
77446 __le16 dnaddr;
77447
77448 set_fs(KERNEL_DS);
77449- result = vfs_read(file, buf, sizeof(buf) - 1, &pos);
77450+ result = vfs_read(file, (char __force_user *)buf, sizeof(buf) - 1, &pos);
77451 set_fs(old_fs);
77452 if (result < 0)
77453 goto out;
77454@@ -1234,7 +1234,7 @@ static ssize_t bin_dn_node_address(struct file *file,
77455 le16_to_cpu(dnaddr) & 0x3ff);
77456
77457 set_fs(KERNEL_DS);
77458- result = vfs_write(file, buf, len, &pos);
77459+ result = vfs_write(file, (const char __force_user *)buf, len, &pos);
77460 set_fs(old_fs);
77461 if (result < 0)
77462 goto out;
77463diff --git a/kernel/taskstats.c b/kernel/taskstats.c
77464index 145bb4d..b2aa969 100644
77465--- a/kernel/taskstats.c
77466+++ b/kernel/taskstats.c
77467@@ -28,9 +28,12 @@
77468 #include <linux/fs.h>
77469 #include <linux/file.h>
77470 #include <linux/pid_namespace.h>
77471+#include <linux/grsecurity.h>
77472 #include <net/genetlink.h>
77473 #include <linux/atomic.h>
77474
77475+extern int gr_is_taskstats_denied(int pid);
77476+
77477 /*
77478 * Maximum length of a cpumask that can be specified in
77479 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
77480@@ -570,6 +573,9 @@ err:
77481
77482 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
77483 {
77484+ if (gr_is_taskstats_denied(current->pid))
77485+ return -EACCES;
77486+
77487 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
77488 return cmd_attr_register_cpumask(info);
77489 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
77490diff --git a/kernel/time.c b/kernel/time.c
77491index d226c6a..c7c0960 100644
77492--- a/kernel/time.c
77493+++ b/kernel/time.c
77494@@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
77495 return error;
77496
77497 if (tz) {
77498+ /* we log in do_settimeofday called below, so don't log twice
77499+ */
77500+ if (!tv)
77501+ gr_log_timechange();
77502+
77503 sys_tz = *tz;
77504 update_vsyscall_tz();
77505 if (firsttime) {
77506diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
77507index f11d83b..d016d91 100644
77508--- a/kernel/time/alarmtimer.c
77509+++ b/kernel/time/alarmtimer.c
77510@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
77511 struct platform_device *pdev;
77512 int error = 0;
77513 int i;
77514- struct k_clock alarm_clock = {
77515+ static struct k_clock alarm_clock = {
77516 .clock_getres = alarm_clock_getres,
77517 .clock_get = alarm_clock_get,
77518 .timer_create = alarm_timer_create,
77519diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
77520index f113755..ec24223 100644
77521--- a/kernel/time/tick-broadcast.c
77522+++ b/kernel/time/tick-broadcast.c
77523@@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
77524 * then clear the broadcast bit.
77525 */
77526 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
77527- int cpu = smp_processor_id();
77528+ cpu = smp_processor_id();
77529
77530 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
77531 tick_broadcast_clear_oneshot(cpu);
77532diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
77533index cbc6acb..3a77191 100644
77534--- a/kernel/time/timekeeping.c
77535+++ b/kernel/time/timekeeping.c
77536@@ -15,6 +15,7 @@
77537 #include <linux/init.h>
77538 #include <linux/mm.h>
77539 #include <linux/sched.h>
77540+#include <linux/grsecurity.h>
77541 #include <linux/syscore_ops.h>
77542 #include <linux/clocksource.h>
77543 #include <linux/jiffies.h>
77544@@ -412,6 +413,8 @@ int do_settimeofday(const struct timespec *tv)
77545 if (!timespec_valid_strict(tv))
77546 return -EINVAL;
77547
77548+ gr_log_timechange();
77549+
77550 write_seqlock_irqsave(&tk->lock, flags);
77551
77552 timekeeping_forward_now(tk);
77553diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
77554index af5a7e9..715611a 100644
77555--- a/kernel/time/timer_list.c
77556+++ b/kernel/time/timer_list.c
77557@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
77558
77559 static void print_name_offset(struct seq_file *m, void *sym)
77560 {
77561+#ifdef CONFIG_GRKERNSEC_HIDESYM
77562+ SEQ_printf(m, "<%p>", NULL);
77563+#else
77564 char symname[KSYM_NAME_LEN];
77565
77566 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
77567 SEQ_printf(m, "<%pK>", sym);
77568 else
77569 SEQ_printf(m, "%s", symname);
77570+#endif
77571 }
77572
77573 static void
77574@@ -112,7 +116,11 @@ next_one:
77575 static void
77576 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
77577 {
77578+#ifdef CONFIG_GRKERNSEC_HIDESYM
77579+ SEQ_printf(m, " .base: %p\n", NULL);
77580+#else
77581 SEQ_printf(m, " .base: %pK\n", base);
77582+#endif
77583 SEQ_printf(m, " .index: %d\n",
77584 base->index);
77585 SEQ_printf(m, " .resolution: %Lu nsecs\n",
77586@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs(void)
77587 {
77588 struct proc_dir_entry *pe;
77589
77590+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77591+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
77592+#else
77593 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
77594+#endif
77595 if (!pe)
77596 return -ENOMEM;
77597 return 0;
77598diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
77599index 0b537f2..40d6c20 100644
77600--- a/kernel/time/timer_stats.c
77601+++ b/kernel/time/timer_stats.c
77602@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
77603 static unsigned long nr_entries;
77604 static struct entry entries[MAX_ENTRIES];
77605
77606-static atomic_t overflow_count;
77607+static atomic_unchecked_t overflow_count;
77608
77609 /*
77610 * The entries are in a hash-table, for fast lookup:
77611@@ -140,7 +140,7 @@ static void reset_entries(void)
77612 nr_entries = 0;
77613 memset(entries, 0, sizeof(entries));
77614 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
77615- atomic_set(&overflow_count, 0);
77616+ atomic_set_unchecked(&overflow_count, 0);
77617 }
77618
77619 static struct entry *alloc_entry(void)
77620@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
77621 if (likely(entry))
77622 entry->count++;
77623 else
77624- atomic_inc(&overflow_count);
77625+ atomic_inc_unchecked(&overflow_count);
77626
77627 out_unlock:
77628 raw_spin_unlock_irqrestore(lock, flags);
77629@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
77630
77631 static void print_name_offset(struct seq_file *m, unsigned long addr)
77632 {
77633+#ifdef CONFIG_GRKERNSEC_HIDESYM
77634+ seq_printf(m, "<%p>", NULL);
77635+#else
77636 char symname[KSYM_NAME_LEN];
77637
77638 if (lookup_symbol_name(addr, symname) < 0)
77639- seq_printf(m, "<%p>", (void *)addr);
77640+ seq_printf(m, "<%pK>", (void *)addr);
77641 else
77642 seq_printf(m, "%s", symname);
77643+#endif
77644 }
77645
77646 static int tstats_show(struct seq_file *m, void *v)
77647@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
77648
77649 seq_puts(m, "Timer Stats Version: v0.2\n");
77650 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
77651- if (atomic_read(&overflow_count))
77652+ if (atomic_read_unchecked(&overflow_count))
77653 seq_printf(m, "Overflow: %d entries\n",
77654- atomic_read(&overflow_count));
77655+ atomic_read_unchecked(&overflow_count));
77656
77657 for (i = 0; i < nr_entries; i++) {
77658 entry = entries + i;
77659@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
77660 {
77661 struct proc_dir_entry *pe;
77662
77663+#ifdef CONFIG_GRKERNSEC_PROC_ADD
77664+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
77665+#else
77666 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
77667+#endif
77668 if (!pe)
77669 return -ENOMEM;
77670 return 0;
77671diff --git a/kernel/timer.c b/kernel/timer.c
77672index 367d008..1ee9ed9 100644
77673--- a/kernel/timer.c
77674+++ b/kernel/timer.c
77675@@ -1363,7 +1363,7 @@ void update_process_times(int user_tick)
77676 /*
77677 * This function runs timers and the timer-tq in bottom half context.
77678 */
77679-static void run_timer_softirq(struct softirq_action *h)
77680+static void run_timer_softirq(void)
77681 {
77682 struct tvec_base *base = __this_cpu_read(tvec_bases);
77683
77684@@ -1772,7 +1772,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
77685 return NOTIFY_OK;
77686 }
77687
77688-static struct notifier_block __cpuinitdata timers_nb = {
77689+static struct notifier_block timers_nb = {
77690 .notifier_call = timer_cpu_notify,
77691 };
77692
77693diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
77694index c0bd030..62a1927 100644
77695--- a/kernel/trace/blktrace.c
77696+++ b/kernel/trace/blktrace.c
77697@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
77698 struct blk_trace *bt = filp->private_data;
77699 char buf[16];
77700
77701- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
77702+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
77703
77704 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
77705 }
77706@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
77707 return 1;
77708
77709 bt = buf->chan->private_data;
77710- atomic_inc(&bt->dropped);
77711+ atomic_inc_unchecked(&bt->dropped);
77712 return 0;
77713 }
77714
77715@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
77716
77717 bt->dir = dir;
77718 bt->dev = dev;
77719- atomic_set(&bt->dropped, 0);
77720+ atomic_set_unchecked(&bt->dropped, 0);
77721
77722 ret = -EIO;
77723 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
77724diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
77725index 43defd1..76da436 100644
77726--- a/kernel/trace/ftrace.c
77727+++ b/kernel/trace/ftrace.c
77728@@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
77729 if (unlikely(ftrace_disabled))
77730 return 0;
77731
77732+ ret = ftrace_arch_code_modify_prepare();
77733+ FTRACE_WARN_ON(ret);
77734+ if (ret)
77735+ return 0;
77736+
77737 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
77738+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
77739 if (ret) {
77740 ftrace_bug(ret, ip);
77741- return 0;
77742 }
77743- return 1;
77744+ return ret ? 0 : 1;
77745 }
77746
77747 /*
77748@@ -2965,7 +2970,7 @@ static void ftrace_free_entry_rcu(struct rcu_head *rhp)
77749
77750 int
77751 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
77752- void *data)
77753+ void *data)
77754 {
77755 struct ftrace_func_probe *entry;
77756 struct ftrace_page *pg;
77757@@ -3832,8 +3837,10 @@ static int ftrace_process_locs(struct module *mod,
77758 if (!count)
77759 return 0;
77760
77761+ pax_open_kernel();
77762 sort(start, count, sizeof(*start),
77763 ftrace_cmp_ips, ftrace_swap_ips);
77764+ pax_close_kernel();
77765
77766 start_pg = ftrace_allocate_pages(count);
77767 if (!start_pg)
77768@@ -4559,8 +4566,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
77769 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
77770
77771 static int ftrace_graph_active;
77772-static struct notifier_block ftrace_suspend_notifier;
77773-
77774 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
77775 {
77776 return 0;
77777@@ -4704,6 +4709,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
77778 return NOTIFY_DONE;
77779 }
77780
77781+static struct notifier_block ftrace_suspend_notifier = {
77782+ .notifier_call = ftrace_suspend_notifier_call
77783+};
77784+
77785 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
77786 trace_func_graph_ent_t entryfunc)
77787 {
77788@@ -4717,7 +4726,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
77789 goto out;
77790 }
77791
77792- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
77793 register_pm_notifier(&ftrace_suspend_notifier);
77794
77795 ftrace_graph_active++;
77796diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
77797index ce8514f..8233573 100644
77798--- a/kernel/trace/ring_buffer.c
77799+++ b/kernel/trace/ring_buffer.c
77800@@ -346,9 +346,9 @@ struct buffer_data_page {
77801 */
77802 struct buffer_page {
77803 struct list_head list; /* list of buffer pages */
77804- local_t write; /* index for next write */
77805+ local_unchecked_t write; /* index for next write */
77806 unsigned read; /* index for next read */
77807- local_t entries; /* entries on this page */
77808+ local_unchecked_t entries; /* entries on this page */
77809 unsigned long real_end; /* real end of data */
77810 struct buffer_data_page *page; /* Actual data page */
77811 };
77812@@ -461,8 +461,8 @@ struct ring_buffer_per_cpu {
77813 unsigned long last_overrun;
77814 local_t entries_bytes;
77815 local_t entries;
77816- local_t overrun;
77817- local_t commit_overrun;
77818+ local_unchecked_t overrun;
77819+ local_unchecked_t commit_overrun;
77820 local_t dropped_events;
77821 local_t committing;
77822 local_t commits;
77823@@ -861,8 +861,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
77824 *
77825 * We add a counter to the write field to denote this.
77826 */
77827- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
77828- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
77829+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
77830+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
77831
77832 /*
77833 * Just make sure we have seen our old_write and synchronize
77834@@ -890,8 +890,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
77835 * cmpxchg to only update if an interrupt did not already
77836 * do it for us. If the cmpxchg fails, we don't care.
77837 */
77838- (void)local_cmpxchg(&next_page->write, old_write, val);
77839- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
77840+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
77841+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
77842
77843 /*
77844 * No need to worry about races with clearing out the commit.
77845@@ -1250,12 +1250,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
77846
77847 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
77848 {
77849- return local_read(&bpage->entries) & RB_WRITE_MASK;
77850+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
77851 }
77852
77853 static inline unsigned long rb_page_write(struct buffer_page *bpage)
77854 {
77855- return local_read(&bpage->write) & RB_WRITE_MASK;
77856+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
77857 }
77858
77859 static int
77860@@ -1350,7 +1350,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
77861 * bytes consumed in ring buffer from here.
77862 * Increment overrun to account for the lost events.
77863 */
77864- local_add(page_entries, &cpu_buffer->overrun);
77865+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
77866 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
77867 }
77868
77869@@ -1906,7 +1906,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
77870 * it is our responsibility to update
77871 * the counters.
77872 */
77873- local_add(entries, &cpu_buffer->overrun);
77874+ local_add_unchecked(entries, &cpu_buffer->overrun);
77875 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
77876
77877 /*
77878@@ -2056,7 +2056,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
77879 if (tail == BUF_PAGE_SIZE)
77880 tail_page->real_end = 0;
77881
77882- local_sub(length, &tail_page->write);
77883+ local_sub_unchecked(length, &tail_page->write);
77884 return;
77885 }
77886
77887@@ -2091,7 +2091,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
77888 rb_event_set_padding(event);
77889
77890 /* Set the write back to the previous setting */
77891- local_sub(length, &tail_page->write);
77892+ local_sub_unchecked(length, &tail_page->write);
77893 return;
77894 }
77895
77896@@ -2103,7 +2103,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
77897
77898 /* Set write to end of buffer */
77899 length = (tail + length) - BUF_PAGE_SIZE;
77900- local_sub(length, &tail_page->write);
77901+ local_sub_unchecked(length, &tail_page->write);
77902 }
77903
77904 /*
77905@@ -2129,7 +2129,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
77906 * about it.
77907 */
77908 if (unlikely(next_page == commit_page)) {
77909- local_inc(&cpu_buffer->commit_overrun);
77910+ local_inc_unchecked(&cpu_buffer->commit_overrun);
77911 goto out_reset;
77912 }
77913
77914@@ -2185,7 +2185,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
77915 cpu_buffer->tail_page) &&
77916 (cpu_buffer->commit_page ==
77917 cpu_buffer->reader_page))) {
77918- local_inc(&cpu_buffer->commit_overrun);
77919+ local_inc_unchecked(&cpu_buffer->commit_overrun);
77920 goto out_reset;
77921 }
77922 }
77923@@ -2233,7 +2233,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
77924 length += RB_LEN_TIME_EXTEND;
77925
77926 tail_page = cpu_buffer->tail_page;
77927- write = local_add_return(length, &tail_page->write);
77928+ write = local_add_return_unchecked(length, &tail_page->write);
77929
77930 /* set write to only the index of the write */
77931 write &= RB_WRITE_MASK;
77932@@ -2250,7 +2250,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
77933 kmemcheck_annotate_bitfield(event, bitfield);
77934 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
77935
77936- local_inc(&tail_page->entries);
77937+ local_inc_unchecked(&tail_page->entries);
77938
77939 /*
77940 * If this is the first commit on the page, then update
77941@@ -2283,7 +2283,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
77942
77943 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
77944 unsigned long write_mask =
77945- local_read(&bpage->write) & ~RB_WRITE_MASK;
77946+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
77947 unsigned long event_length = rb_event_length(event);
77948 /*
77949 * This is on the tail page. It is possible that
77950@@ -2293,7 +2293,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
77951 */
77952 old_index += write_mask;
77953 new_index += write_mask;
77954- index = local_cmpxchg(&bpage->write, old_index, new_index);
77955+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
77956 if (index == old_index) {
77957 /* update counters */
77958 local_sub(event_length, &cpu_buffer->entries_bytes);
77959@@ -2632,7 +2632,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
77960
77961 /* Do the likely case first */
77962 if (likely(bpage->page == (void *)addr)) {
77963- local_dec(&bpage->entries);
77964+ local_dec_unchecked(&bpage->entries);
77965 return;
77966 }
77967
77968@@ -2644,7 +2644,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
77969 start = bpage;
77970 do {
77971 if (bpage->page == (void *)addr) {
77972- local_dec(&bpage->entries);
77973+ local_dec_unchecked(&bpage->entries);
77974 return;
77975 }
77976 rb_inc_page(cpu_buffer, &bpage);
77977@@ -2926,7 +2926,7 @@ static inline unsigned long
77978 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
77979 {
77980 return local_read(&cpu_buffer->entries) -
77981- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
77982+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
77983 }
77984
77985 /**
77986@@ -3015,7 +3015,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
77987 return 0;
77988
77989 cpu_buffer = buffer->buffers[cpu];
77990- ret = local_read(&cpu_buffer->overrun);
77991+ ret = local_read_unchecked(&cpu_buffer->overrun);
77992
77993 return ret;
77994 }
77995@@ -3038,7 +3038,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
77996 return 0;
77997
77998 cpu_buffer = buffer->buffers[cpu];
77999- ret = local_read(&cpu_buffer->commit_overrun);
78000+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
78001
78002 return ret;
78003 }
78004@@ -3105,7 +3105,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
78005 /* if you care about this being correct, lock the buffer */
78006 for_each_buffer_cpu(buffer, cpu) {
78007 cpu_buffer = buffer->buffers[cpu];
78008- overruns += local_read(&cpu_buffer->overrun);
78009+ overruns += local_read_unchecked(&cpu_buffer->overrun);
78010 }
78011
78012 return overruns;
78013@@ -3281,8 +3281,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
78014 /*
78015 * Reset the reader page to size zero.
78016 */
78017- local_set(&cpu_buffer->reader_page->write, 0);
78018- local_set(&cpu_buffer->reader_page->entries, 0);
78019+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
78020+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
78021 local_set(&cpu_buffer->reader_page->page->commit, 0);
78022 cpu_buffer->reader_page->real_end = 0;
78023
78024@@ -3316,7 +3316,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
78025 * want to compare with the last_overrun.
78026 */
78027 smp_mb();
78028- overwrite = local_read(&(cpu_buffer->overrun));
78029+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
78030
78031 /*
78032 * Here's the tricky part.
78033@@ -3886,8 +3886,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
78034
78035 cpu_buffer->head_page
78036 = list_entry(cpu_buffer->pages, struct buffer_page, list);
78037- local_set(&cpu_buffer->head_page->write, 0);
78038- local_set(&cpu_buffer->head_page->entries, 0);
78039+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
78040+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
78041 local_set(&cpu_buffer->head_page->page->commit, 0);
78042
78043 cpu_buffer->head_page->read = 0;
78044@@ -3897,14 +3897,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
78045
78046 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
78047 INIT_LIST_HEAD(&cpu_buffer->new_pages);
78048- local_set(&cpu_buffer->reader_page->write, 0);
78049- local_set(&cpu_buffer->reader_page->entries, 0);
78050+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
78051+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
78052 local_set(&cpu_buffer->reader_page->page->commit, 0);
78053 cpu_buffer->reader_page->read = 0;
78054
78055 local_set(&cpu_buffer->entries_bytes, 0);
78056- local_set(&cpu_buffer->overrun, 0);
78057- local_set(&cpu_buffer->commit_overrun, 0);
78058+ local_set_unchecked(&cpu_buffer->overrun, 0);
78059+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
78060 local_set(&cpu_buffer->dropped_events, 0);
78061 local_set(&cpu_buffer->entries, 0);
78062 local_set(&cpu_buffer->committing, 0);
78063@@ -4308,8 +4308,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
78064 rb_init_page(bpage);
78065 bpage = reader->page;
78066 reader->page = *data_page;
78067- local_set(&reader->write, 0);
78068- local_set(&reader->entries, 0);
78069+ local_set_unchecked(&reader->write, 0);
78070+ local_set_unchecked(&reader->entries, 0);
78071 reader->read = 0;
78072 *data_page = bpage;
78073
78074diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
78075index 3c13e46..883d039 100644
78076--- a/kernel/trace/trace.c
78077+++ b/kernel/trace/trace.c
78078@@ -4465,10 +4465,9 @@ static const struct file_operations tracing_dyn_info_fops = {
78079 };
78080 #endif
78081
78082-static struct dentry *d_tracer;
78083-
78084 struct dentry *tracing_init_dentry(void)
78085 {
78086+ static struct dentry *d_tracer;
78087 static int once;
78088
78089 if (d_tracer)
78090@@ -4488,10 +4487,9 @@ struct dentry *tracing_init_dentry(void)
78091 return d_tracer;
78092 }
78093
78094-static struct dentry *d_percpu;
78095-
78096 struct dentry *tracing_dentry_percpu(void)
78097 {
78098+ static struct dentry *d_percpu;
78099 static int once;
78100 struct dentry *d_tracer;
78101
78102diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
78103index 880073d..42db7c3 100644
78104--- a/kernel/trace/trace_events.c
78105+++ b/kernel/trace/trace_events.c
78106@@ -1330,10 +1330,6 @@ static LIST_HEAD(ftrace_module_file_list);
78107 struct ftrace_module_file_ops {
78108 struct list_head list;
78109 struct module *mod;
78110- struct file_operations id;
78111- struct file_operations enable;
78112- struct file_operations format;
78113- struct file_operations filter;
78114 };
78115
78116 static struct ftrace_module_file_ops *
78117@@ -1354,17 +1350,12 @@ trace_create_file_ops(struct module *mod)
78118
78119 file_ops->mod = mod;
78120
78121- file_ops->id = ftrace_event_id_fops;
78122- file_ops->id.owner = mod;
78123-
78124- file_ops->enable = ftrace_enable_fops;
78125- file_ops->enable.owner = mod;
78126-
78127- file_ops->filter = ftrace_event_filter_fops;
78128- file_ops->filter.owner = mod;
78129-
78130- file_ops->format = ftrace_event_format_fops;
78131- file_ops->format.owner = mod;
78132+ pax_open_kernel();
78133+ mod->trace_id.owner = mod;
78134+ mod->trace_enable.owner = mod;
78135+ mod->trace_filter.owner = mod;
78136+ mod->trace_format.owner = mod;
78137+ pax_close_kernel();
78138
78139 list_add(&file_ops->list, &ftrace_module_file_list);
78140
78141@@ -1388,8 +1379,8 @@ static void trace_module_add_events(struct module *mod)
78142
78143 for_each_event(call, start, end) {
78144 __trace_add_event_call(*call, mod,
78145- &file_ops->id, &file_ops->enable,
78146- &file_ops->filter, &file_ops->format);
78147+ &mod->trace_id, &mod->trace_enable,
78148+ &mod->trace_filter, &mod->trace_format);
78149 }
78150 }
78151
78152diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
78153index fd3c8aa..5f324a6 100644
78154--- a/kernel/trace/trace_mmiotrace.c
78155+++ b/kernel/trace/trace_mmiotrace.c
78156@@ -24,7 +24,7 @@ struct header_iter {
78157 static struct trace_array *mmio_trace_array;
78158 static bool overrun_detected;
78159 static unsigned long prev_overruns;
78160-static atomic_t dropped_count;
78161+static atomic_unchecked_t dropped_count;
78162
78163 static void mmio_reset_data(struct trace_array *tr)
78164 {
78165@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
78166
78167 static unsigned long count_overruns(struct trace_iterator *iter)
78168 {
78169- unsigned long cnt = atomic_xchg(&dropped_count, 0);
78170+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
78171 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
78172
78173 if (over > prev_overruns)
78174@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
78175 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
78176 sizeof(*entry), 0, pc);
78177 if (!event) {
78178- atomic_inc(&dropped_count);
78179+ atomic_inc_unchecked(&dropped_count);
78180 return;
78181 }
78182 entry = ring_buffer_event_data(event);
78183@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
78184 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
78185 sizeof(*entry), 0, pc);
78186 if (!event) {
78187- atomic_inc(&dropped_count);
78188+ atomic_inc_unchecked(&dropped_count);
78189 return;
78190 }
78191 entry = ring_buffer_event_data(event);
78192diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
78193index 194d796..76edb8f 100644
78194--- a/kernel/trace/trace_output.c
78195+++ b/kernel/trace/trace_output.c
78196@@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
78197
78198 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
78199 if (!IS_ERR(p)) {
78200- p = mangle_path(s->buffer + s->len, p, "\n");
78201+ p = mangle_path(s->buffer + s->len, p, "\n\\");
78202 if (p) {
78203 s->len = p - s->buffer;
78204 return 1;
78205@@ -852,14 +852,16 @@ int register_ftrace_event(struct trace_event *event)
78206 goto out;
78207 }
78208
78209+ pax_open_kernel();
78210 if (event->funcs->trace == NULL)
78211- event->funcs->trace = trace_nop_print;
78212+ *(void **)&event->funcs->trace = trace_nop_print;
78213 if (event->funcs->raw == NULL)
78214- event->funcs->raw = trace_nop_print;
78215+ *(void **)&event->funcs->raw = trace_nop_print;
78216 if (event->funcs->hex == NULL)
78217- event->funcs->hex = trace_nop_print;
78218+ *(void **)&event->funcs->hex = trace_nop_print;
78219 if (event->funcs->binary == NULL)
78220- event->funcs->binary = trace_nop_print;
78221+ *(void **)&event->funcs->binary = trace_nop_print;
78222+ pax_close_kernel();
78223
78224 key = event->type & (EVENT_HASHSIZE - 1);
78225
78226diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
78227index 42ca822..cdcacc6 100644
78228--- a/kernel/trace/trace_stack.c
78229+++ b/kernel/trace/trace_stack.c
78230@@ -52,7 +52,7 @@ static inline void check_stack(void)
78231 return;
78232
78233 /* we do not handle interrupt stacks yet */
78234- if (!object_is_on_stack(&this_size))
78235+ if (!object_starts_on_stack(&this_size))
78236 return;
78237
78238 local_irq_save(flags);
78239diff --git a/kernel/user.c b/kernel/user.c
78240index 33acb5e..57ebfd4 100644
78241--- a/kernel/user.c
78242+++ b/kernel/user.c
78243@@ -47,9 +47,7 @@ struct user_namespace init_user_ns = {
78244 .count = 4294967295U,
78245 },
78246 },
78247- .kref = {
78248- .refcount = ATOMIC_INIT(3),
78249- },
78250+ .count = ATOMIC_INIT(3),
78251 .owner = GLOBAL_ROOT_UID,
78252 .group = GLOBAL_ROOT_GID,
78253 .proc_inum = PROC_USER_INIT_INO,
78254diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
78255index 2b042c4..24f8ec3 100644
78256--- a/kernel/user_namespace.c
78257+++ b/kernel/user_namespace.c
78258@@ -78,7 +78,7 @@ int create_user_ns(struct cred *new)
78259 return ret;
78260 }
78261
78262- kref_init(&ns->kref);
78263+ atomic_set(&ns->count, 1);
78264 /* Leave the new->user_ns reference with the new user namespace. */
78265 ns->parent = parent_ns;
78266 ns->owner = owner;
78267@@ -104,15 +104,16 @@ int unshare_userns(unsigned long unshare_flags, struct cred **new_cred)
78268 return create_user_ns(cred);
78269 }
78270
78271-void free_user_ns(struct kref *kref)
78272+void free_user_ns(struct user_namespace *ns)
78273 {
78274- struct user_namespace *parent, *ns =
78275- container_of(kref, struct user_namespace, kref);
78276+ struct user_namespace *parent;
78277
78278- parent = ns->parent;
78279- proc_free_inum(ns->proc_inum);
78280- kmem_cache_free(user_ns_cachep, ns);
78281- put_user_ns(parent);
78282+ do {
78283+ parent = ns->parent;
78284+ proc_free_inum(ns->proc_inum);
78285+ kmem_cache_free(user_ns_cachep, ns);
78286+ ns = parent;
78287+ } while (atomic_dec_and_test(&parent->count));
78288 }
78289 EXPORT_SYMBOL(free_user_ns);
78290
78291diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
78292index 63da38c..639904e 100644
78293--- a/kernel/utsname_sysctl.c
78294+++ b/kernel/utsname_sysctl.c
78295@@ -46,7 +46,7 @@ static void put_uts(ctl_table *table, int write, void *which)
78296 static int proc_do_uts_string(ctl_table *table, int write,
78297 void __user *buffer, size_t *lenp, loff_t *ppos)
78298 {
78299- struct ctl_table uts_table;
78300+ ctl_table_no_const uts_table;
78301 int r;
78302 memcpy(&uts_table, table, sizeof(uts_table));
78303 uts_table.data = get_uts(table, write);
78304diff --git a/kernel/watchdog.c b/kernel/watchdog.c
78305index 75a2ab3..5961da7 100644
78306--- a/kernel/watchdog.c
78307+++ b/kernel/watchdog.c
78308@@ -527,7 +527,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
78309 }
78310 #endif /* CONFIG_SYSCTL */
78311
78312-static struct smp_hotplug_thread watchdog_threads = {
78313+static struct smp_hotplug_thread watchdog_threads __read_only = {
78314 .store = &softlockup_watchdog,
78315 .thread_should_run = watchdog_should_run,
78316 .thread_fn = watchdog,
78317diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
78318index 67604e5..fe94fb1 100644
78319--- a/lib/Kconfig.debug
78320+++ b/lib/Kconfig.debug
78321@@ -550,7 +550,7 @@ config DEBUG_MUTEXES
78322
78323 config DEBUG_LOCK_ALLOC
78324 bool "Lock debugging: detect incorrect freeing of live locks"
78325- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
78326+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
78327 select DEBUG_SPINLOCK
78328 select DEBUG_MUTEXES
78329 select LOCKDEP
78330@@ -564,7 +564,7 @@ config DEBUG_LOCK_ALLOC
78331
78332 config PROVE_LOCKING
78333 bool "Lock debugging: prove locking correctness"
78334- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
78335+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
78336 select LOCKDEP
78337 select DEBUG_SPINLOCK
78338 select DEBUG_MUTEXES
78339@@ -670,7 +670,7 @@ config LOCKDEP
78340
78341 config LOCK_STAT
78342 bool "Lock usage statistics"
78343- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
78344+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
78345 select LOCKDEP
78346 select DEBUG_SPINLOCK
78347 select DEBUG_MUTEXES
78348@@ -1278,6 +1278,7 @@ config LATENCYTOP
78349 depends on DEBUG_KERNEL
78350 depends on STACKTRACE_SUPPORT
78351 depends on PROC_FS
78352+ depends on !GRKERNSEC_HIDESYM
78353 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
78354 select KALLSYMS
78355 select KALLSYMS_ALL
78356@@ -1306,7 +1307,7 @@ config INTERVAL_TREE_TEST
78357
78358 config PROVIDE_OHCI1394_DMA_INIT
78359 bool "Remote debugging over FireWire early on boot"
78360- depends on PCI && X86
78361+ depends on PCI && X86 && !GRKERNSEC
78362 help
78363 If you want to debug problems which hang or crash the kernel early
78364 on boot and the crashing machine has a FireWire port, you can use
78365@@ -1335,7 +1336,7 @@ config PROVIDE_OHCI1394_DMA_INIT
78366
78367 config FIREWIRE_OHCI_REMOTE_DMA
78368 bool "Remote debugging over FireWire with firewire-ohci"
78369- depends on FIREWIRE_OHCI
78370+ depends on FIREWIRE_OHCI && !GRKERNSEC
78371 help
78372 This option lets you use the FireWire bus for remote debugging
78373 with help of the firewire-ohci driver. It enables unfiltered
78374diff --git a/lib/Makefile b/lib/Makefile
78375index 02ed6c0..bd243da 100644
78376--- a/lib/Makefile
78377+++ b/lib/Makefile
78378@@ -47,7 +47,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
78379
78380 obj-$(CONFIG_BTREE) += btree.o
78381 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
78382-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
78383+obj-y += list_debug.o
78384 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
78385
78386 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
78387diff --git a/lib/bitmap.c b/lib/bitmap.c
78388index 06f7e4f..f3cf2b0 100644
78389--- a/lib/bitmap.c
78390+++ b/lib/bitmap.c
78391@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
78392 {
78393 int c, old_c, totaldigits, ndigits, nchunks, nbits;
78394 u32 chunk;
78395- const char __user __force *ubuf = (const char __user __force *)buf;
78396+ const char __user *ubuf = (const char __force_user *)buf;
78397
78398 bitmap_zero(maskp, nmaskbits);
78399
78400@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
78401 {
78402 if (!access_ok(VERIFY_READ, ubuf, ulen))
78403 return -EFAULT;
78404- return __bitmap_parse((const char __force *)ubuf,
78405+ return __bitmap_parse((const char __force_kernel *)ubuf,
78406 ulen, 1, maskp, nmaskbits);
78407
78408 }
78409@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
78410 {
78411 unsigned a, b;
78412 int c, old_c, totaldigits;
78413- const char __user __force *ubuf = (const char __user __force *)buf;
78414+ const char __user *ubuf = (const char __force_user *)buf;
78415 int exp_digit, in_range;
78416
78417 totaldigits = c = 0;
78418@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
78419 {
78420 if (!access_ok(VERIFY_READ, ubuf, ulen))
78421 return -EFAULT;
78422- return __bitmap_parselist((const char __force *)ubuf,
78423+ return __bitmap_parselist((const char __force_kernel *)ubuf,
78424 ulen, 1, maskp, nmaskbits);
78425 }
78426 EXPORT_SYMBOL(bitmap_parselist_user);
78427diff --git a/lib/bug.c b/lib/bug.c
78428index d0cdf14..4d07bd2 100644
78429--- a/lib/bug.c
78430+++ b/lib/bug.c
78431@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
78432 return BUG_TRAP_TYPE_NONE;
78433
78434 bug = find_bug(bugaddr);
78435+ if (!bug)
78436+ return BUG_TRAP_TYPE_NONE;
78437
78438 file = NULL;
78439 line = 0;
78440diff --git a/lib/debugobjects.c b/lib/debugobjects.c
78441index d11808c..dc2d6f8 100644
78442--- a/lib/debugobjects.c
78443+++ b/lib/debugobjects.c
78444@@ -287,7 +287,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
78445 if (limit > 4)
78446 return;
78447
78448- is_on_stack = object_is_on_stack(addr);
78449+ is_on_stack = object_starts_on_stack(addr);
78450 if (is_on_stack == onstack)
78451 return;
78452
78453diff --git a/lib/devres.c b/lib/devres.c
78454index 80b9c76..9e32279 100644
78455--- a/lib/devres.c
78456+++ b/lib/devres.c
78457@@ -80,7 +80,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
78458 void devm_iounmap(struct device *dev, void __iomem *addr)
78459 {
78460 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
78461- (void *)addr));
78462+ (void __force *)addr));
78463 iounmap(addr);
78464 }
78465 EXPORT_SYMBOL(devm_iounmap);
78466@@ -192,7 +192,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
78467 {
78468 ioport_unmap(addr);
78469 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
78470- devm_ioport_map_match, (void *)addr));
78471+ devm_ioport_map_match, (void __force *)addr));
78472 }
78473 EXPORT_SYMBOL(devm_ioport_unmap);
78474
78475diff --git a/lib/dma-debug.c b/lib/dma-debug.c
78476index 5e396ac..58d5de1 100644
78477--- a/lib/dma-debug.c
78478+++ b/lib/dma-debug.c
78479@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
78480
78481 void dma_debug_add_bus(struct bus_type *bus)
78482 {
78483- struct notifier_block *nb;
78484+ notifier_block_no_const *nb;
78485
78486 if (global_disable)
78487 return;
78488@@ -942,7 +942,7 @@ out:
78489
78490 static void check_for_stack(struct device *dev, void *addr)
78491 {
78492- if (object_is_on_stack(addr))
78493+ if (object_starts_on_stack(addr))
78494 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
78495 "stack [addr=%p]\n", addr);
78496 }
78497diff --git a/lib/inflate.c b/lib/inflate.c
78498index 013a761..c28f3fc 100644
78499--- a/lib/inflate.c
78500+++ b/lib/inflate.c
78501@@ -269,7 +269,7 @@ static void free(void *where)
78502 malloc_ptr = free_mem_ptr;
78503 }
78504 #else
78505-#define malloc(a) kmalloc(a, GFP_KERNEL)
78506+#define malloc(a) kmalloc((a), GFP_KERNEL)
78507 #define free(a) kfree(a)
78508 #endif
78509
78510diff --git a/lib/ioremap.c b/lib/ioremap.c
78511index 0c9216c..863bd89 100644
78512--- a/lib/ioremap.c
78513+++ b/lib/ioremap.c
78514@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
78515 unsigned long next;
78516
78517 phys_addr -= addr;
78518- pmd = pmd_alloc(&init_mm, pud, addr);
78519+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
78520 if (!pmd)
78521 return -ENOMEM;
78522 do {
78523@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
78524 unsigned long next;
78525
78526 phys_addr -= addr;
78527- pud = pud_alloc(&init_mm, pgd, addr);
78528+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
78529 if (!pud)
78530 return -ENOMEM;
78531 do {
78532diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
78533index bd2bea9..6b3c95e 100644
78534--- a/lib/is_single_threaded.c
78535+++ b/lib/is_single_threaded.c
78536@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
78537 struct task_struct *p, *t;
78538 bool ret;
78539
78540+ if (!mm)
78541+ return true;
78542+
78543 if (atomic_read(&task->signal->live) != 1)
78544 return false;
78545
78546diff --git a/lib/kobject.c b/lib/kobject.c
78547index e07ee1f..998489d 100644
78548--- a/lib/kobject.c
78549+++ b/lib/kobject.c
78550@@ -852,9 +852,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
78551
78552
78553 static DEFINE_SPINLOCK(kobj_ns_type_lock);
78554-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
78555+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
78556
78557-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
78558+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
78559 {
78560 enum kobj_ns_type type = ops->type;
78561 int error;
78562diff --git a/lib/list_debug.c b/lib/list_debug.c
78563index c24c2f7..0475b78 100644
78564--- a/lib/list_debug.c
78565+++ b/lib/list_debug.c
78566@@ -11,7 +11,9 @@
78567 #include <linux/bug.h>
78568 #include <linux/kernel.h>
78569 #include <linux/rculist.h>
78570+#include <linux/mm.h>
78571
78572+#ifdef CONFIG_DEBUG_LIST
78573 /*
78574 * Insert a new entry between two known consecutive entries.
78575 *
78576@@ -19,21 +21,32 @@
78577 * the prev/next entries already!
78578 */
78579
78580-void __list_add(struct list_head *new,
78581- struct list_head *prev,
78582- struct list_head *next)
78583+static bool __list_add_debug(struct list_head *new,
78584+ struct list_head *prev,
78585+ struct list_head *next)
78586 {
78587- WARN(next->prev != prev,
78588+ if (WARN(next->prev != prev,
78589 "list_add corruption. next->prev should be "
78590 "prev (%p), but was %p. (next=%p).\n",
78591- prev, next->prev, next);
78592- WARN(prev->next != next,
78593+ prev, next->prev, next) ||
78594+ WARN(prev->next != next,
78595 "list_add corruption. prev->next should be "
78596 "next (%p), but was %p. (prev=%p).\n",
78597- next, prev->next, prev);
78598- WARN(new == prev || new == next,
78599+ next, prev->next, prev) ||
78600+ WARN(new == prev || new == next,
78601 "list_add double add: new=%p, prev=%p, next=%p.\n",
78602- new, prev, next);
78603+ new, prev, next))
78604+ return false;
78605+ return true;
78606+}
78607+
78608+void __list_add(struct list_head *new,
78609+ struct list_head *prev,
78610+ struct list_head *next)
78611+{
78612+ if (!__list_add_debug(new, prev, next))
78613+ return;
78614+
78615 next->prev = new;
78616 new->next = next;
78617 new->prev = prev;
78618@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
78619 }
78620 EXPORT_SYMBOL(__list_add);
78621
78622-void __list_del_entry(struct list_head *entry)
78623+static bool __list_del_entry_debug(struct list_head *entry)
78624 {
78625 struct list_head *prev, *next;
78626
78627@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
78628 WARN(next->prev != entry,
78629 "list_del corruption. next->prev should be %p, "
78630 "but was %p\n", entry, next->prev))
78631+ return false;
78632+ return true;
78633+}
78634+
78635+void __list_del_entry(struct list_head *entry)
78636+{
78637+ if (!__list_del_entry_debug(entry))
78638 return;
78639
78640- __list_del(prev, next);
78641+ __list_del(entry->prev, entry->next);
78642 }
78643 EXPORT_SYMBOL(__list_del_entry);
78644
78645@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
78646 void __list_add_rcu(struct list_head *new,
78647 struct list_head *prev, struct list_head *next)
78648 {
78649- WARN(next->prev != prev,
78650- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
78651- prev, next->prev, next);
78652- WARN(prev->next != next,
78653- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
78654- next, prev->next, prev);
78655+ if (!__list_add_debug(new, prev, next))
78656+ return;
78657+
78658 new->next = next;
78659 new->prev = prev;
78660 rcu_assign_pointer(list_next_rcu(prev), new);
78661 next->prev = new;
78662 }
78663 EXPORT_SYMBOL(__list_add_rcu);
78664+#endif
78665+
78666+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
78667+{
78668+#ifdef CONFIG_DEBUG_LIST
78669+ if (!__list_add_debug(new, prev, next))
78670+ return;
78671+#endif
78672+
78673+ pax_open_kernel();
78674+ next->prev = new;
78675+ new->next = next;
78676+ new->prev = prev;
78677+ prev->next = new;
78678+ pax_close_kernel();
78679+}
78680+EXPORT_SYMBOL(__pax_list_add);
78681+
78682+void pax_list_del(struct list_head *entry)
78683+{
78684+#ifdef CONFIG_DEBUG_LIST
78685+ if (!__list_del_entry_debug(entry))
78686+ return;
78687+#endif
78688+
78689+ pax_open_kernel();
78690+ __list_del(entry->prev, entry->next);
78691+ entry->next = LIST_POISON1;
78692+ entry->prev = LIST_POISON2;
78693+ pax_close_kernel();
78694+}
78695+EXPORT_SYMBOL(pax_list_del);
78696+
78697+void pax_list_del_init(struct list_head *entry)
78698+{
78699+ pax_open_kernel();
78700+ __list_del(entry->prev, entry->next);
78701+ INIT_LIST_HEAD(entry);
78702+ pax_close_kernel();
78703+}
78704+EXPORT_SYMBOL(pax_list_del_init);
78705+
78706+void __pax_list_add_rcu(struct list_head *new,
78707+ struct list_head *prev, struct list_head *next)
78708+{
78709+#ifdef CONFIG_DEBUG_LIST
78710+ if (!__list_add_debug(new, prev, next))
78711+ return;
78712+#endif
78713+
78714+ pax_open_kernel();
78715+ new->next = next;
78716+ new->prev = prev;
78717+ rcu_assign_pointer(list_next_rcu(prev), new);
78718+ next->prev = new;
78719+ pax_close_kernel();
78720+}
78721+EXPORT_SYMBOL(__pax_list_add_rcu);
78722+
78723+void pax_list_del_rcu(struct list_head *entry)
78724+{
78725+#ifdef CONFIG_DEBUG_LIST
78726+ if (!__list_del_entry_debug(entry))
78727+ return;
78728+#endif
78729+
78730+ pax_open_kernel();
78731+ __list_del(entry->prev, entry->next);
78732+ entry->next = LIST_POISON1;
78733+ entry->prev = LIST_POISON2;
78734+ pax_close_kernel();
78735+}
78736+EXPORT_SYMBOL(pax_list_del_rcu);
78737diff --git a/lib/radix-tree.c b/lib/radix-tree.c
78738index e796429..6e38f9f 100644
78739--- a/lib/radix-tree.c
78740+++ b/lib/radix-tree.c
78741@@ -92,7 +92,7 @@ struct radix_tree_preload {
78742 int nr;
78743 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
78744 };
78745-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
78746+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
78747
78748 static inline void *ptr_to_indirect(void *ptr)
78749 {
78750diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
78751index bb2b201..46abaf9 100644
78752--- a/lib/strncpy_from_user.c
78753+++ b/lib/strncpy_from_user.c
78754@@ -21,7 +21,7 @@
78755 */
78756 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
78757 {
78758- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78759+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78760 long res = 0;
78761
78762 /*
78763diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
78764index a28df52..3d55877 100644
78765--- a/lib/strnlen_user.c
78766+++ b/lib/strnlen_user.c
78767@@ -26,7 +26,7 @@
78768 */
78769 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
78770 {
78771- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78772+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
78773 long align, res = 0;
78774 unsigned long c;
78775
78776diff --git a/lib/swiotlb.c b/lib/swiotlb.c
78777index 196b069..358f342 100644
78778--- a/lib/swiotlb.c
78779+++ b/lib/swiotlb.c
78780@@ -642,7 +642,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
78781
78782 void
78783 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
78784- dma_addr_t dev_addr)
78785+ dma_addr_t dev_addr, struct dma_attrs *attrs)
78786 {
78787 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
78788
78789diff --git a/lib/vsprintf.c b/lib/vsprintf.c
78790index fab33a9..3b5fe68 100644
78791--- a/lib/vsprintf.c
78792+++ b/lib/vsprintf.c
78793@@ -16,6 +16,9 @@
78794 * - scnprintf and vscnprintf
78795 */
78796
78797+#ifdef CONFIG_GRKERNSEC_HIDESYM
78798+#define __INCLUDED_BY_HIDESYM 1
78799+#endif
78800 #include <stdarg.h>
78801 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
78802 #include <linux/types.h>
78803@@ -541,7 +544,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
78804 char sym[KSYM_SYMBOL_LEN];
78805 if (ext == 'B')
78806 sprint_backtrace(sym, value);
78807- else if (ext != 'f' && ext != 's')
78808+ else if (ext != 'f' && ext != 's' && ext != 'a')
78809 sprint_symbol(sym, value);
78810 else
78811 sprint_symbol_no_offset(sym, value);
78812@@ -974,7 +977,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
78813 return number(buf, end, *(const netdev_features_t *)addr, spec);
78814 }
78815
78816+#ifdef CONFIG_GRKERNSEC_HIDESYM
78817+int kptr_restrict __read_mostly = 2;
78818+#else
78819 int kptr_restrict __read_mostly;
78820+#endif
78821
78822 /*
78823 * Show a '%p' thing. A kernel extension is that the '%p' is followed
78824@@ -988,6 +995,8 @@ int kptr_restrict __read_mostly;
78825 * - 'S' For symbolic direct pointers with offset
78826 * - 's' For symbolic direct pointers without offset
78827 * - 'B' For backtraced symbolic direct pointers with offset
78828+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
78829+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
78830 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
78831 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
78832 * - 'M' For a 6-byte MAC address, it prints the address in the
78833@@ -1043,12 +1052,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78834
78835 if (!ptr && *fmt != 'K') {
78836 /*
78837- * Print (null) with the same width as a pointer so it makes
78838+ * Print (nil) with the same width as a pointer so it makes
78839 * tabular output look nice.
78840 */
78841 if (spec.field_width == -1)
78842 spec.field_width = default_width;
78843- return string(buf, end, "(null)", spec);
78844+ return string(buf, end, "(nil)", spec);
78845 }
78846
78847 switch (*fmt) {
78848@@ -1058,6 +1067,13 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78849 /* Fallthrough */
78850 case 'S':
78851 case 's':
78852+#ifdef CONFIG_GRKERNSEC_HIDESYM
78853+ break;
78854+#else
78855+ return symbol_string(buf, end, ptr, spec, *fmt);
78856+#endif
78857+ case 'A':
78858+ case 'a':
78859 case 'B':
78860 return symbol_string(buf, end, ptr, spec, *fmt);
78861 case 'R':
78862@@ -1098,6 +1114,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78863 va_end(va);
78864 return buf;
78865 }
78866+ case 'P':
78867+ break;
78868 case 'K':
78869 /*
78870 * %pK cannot be used in IRQ context because its test
78871@@ -1121,6 +1139,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
78872 }
78873 break;
78874 }
78875+
78876+#ifdef CONFIG_GRKERNSEC_HIDESYM
78877+ /* 'P' = approved pointers to copy to userland,
78878+ as in the /proc/kallsyms case, as we make it display nothing
78879+ for non-root users, and the real contents for root users
78880+ Also ignore 'K' pointers, since we force their NULLing for non-root users
78881+ above
78882+ */
78883+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
78884+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
78885+ dump_stack();
78886+ ptr = NULL;
78887+ }
78888+#endif
78889+
78890 spec.flags |= SMALL;
78891 if (spec.field_width == -1) {
78892 spec.field_width = default_width;
78893@@ -1842,11 +1875,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
78894 typeof(type) value; \
78895 if (sizeof(type) == 8) { \
78896 args = PTR_ALIGN(args, sizeof(u32)); \
78897- *(u32 *)&value = *(u32 *)args; \
78898- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
78899+ *(u32 *)&value = *(const u32 *)args; \
78900+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
78901 } else { \
78902 args = PTR_ALIGN(args, sizeof(type)); \
78903- value = *(typeof(type) *)args; \
78904+ value = *(const typeof(type) *)args; \
78905 } \
78906 args += sizeof(type); \
78907 value; \
78908@@ -1909,7 +1942,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
78909 case FORMAT_TYPE_STR: {
78910 const char *str_arg = args;
78911 args += strlen(str_arg) + 1;
78912- str = string(str, end, (char *)str_arg, spec);
78913+ str = string(str, end, str_arg, spec);
78914 break;
78915 }
78916
78917diff --git a/localversion-grsec b/localversion-grsec
78918new file mode 100644
78919index 0000000..7cd6065
78920--- /dev/null
78921+++ b/localversion-grsec
78922@@ -0,0 +1 @@
78923+-grsec
78924diff --git a/mm/Kconfig b/mm/Kconfig
78925index 278e3ab..87c384d 100644
78926--- a/mm/Kconfig
78927+++ b/mm/Kconfig
78928@@ -286,10 +286,10 @@ config KSM
78929 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
78930
78931 config DEFAULT_MMAP_MIN_ADDR
78932- int "Low address space to protect from user allocation"
78933+ int "Low address space to protect from user allocation"
78934 depends on MMU
78935- default 4096
78936- help
78937+ default 65536
78938+ help
78939 This is the portion of low virtual memory which should be protected
78940 from userspace allocation. Keeping a user from writing to low pages
78941 can help reduce the impact of kernel NULL pointer bugs.
78942@@ -320,7 +320,7 @@ config MEMORY_FAILURE
78943
78944 config HWPOISON_INJECT
78945 tristate "HWPoison pages injector"
78946- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
78947+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
78948 select PROC_PAGE_MONITOR
78949
78950 config NOMMU_INITIAL_TRIM_EXCESS
78951diff --git a/mm/filemap.c b/mm/filemap.c
78952index 83efee7..3f99381 100644
78953--- a/mm/filemap.c
78954+++ b/mm/filemap.c
78955@@ -1747,7 +1747,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
78956 struct address_space *mapping = file->f_mapping;
78957
78958 if (!mapping->a_ops->readpage)
78959- return -ENOEXEC;
78960+ return -ENODEV;
78961 file_accessed(file);
78962 vma->vm_ops = &generic_file_vm_ops;
78963 return 0;
78964@@ -2087,6 +2087,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
78965 *pos = i_size_read(inode);
78966
78967 if (limit != RLIM_INFINITY) {
78968+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
78969 if (*pos >= limit) {
78970 send_sig(SIGXFSZ, current, 0);
78971 return -EFBIG;
78972diff --git a/mm/fremap.c b/mm/fremap.c
78973index a0aaf0e..20325c3 100644
78974--- a/mm/fremap.c
78975+++ b/mm/fremap.c
78976@@ -157,6 +157,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
78977 retry:
78978 vma = find_vma(mm, start);
78979
78980+#ifdef CONFIG_PAX_SEGMEXEC
78981+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
78982+ goto out;
78983+#endif
78984+
78985 /*
78986 * Make sure the vma is shared, that it supports prefaulting,
78987 * and that the remapped range is valid and fully within
78988diff --git a/mm/highmem.c b/mm/highmem.c
78989index b32b70c..e512eb0 100644
78990--- a/mm/highmem.c
78991+++ b/mm/highmem.c
78992@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
78993 * So no dangers, even with speculative execution.
78994 */
78995 page = pte_page(pkmap_page_table[i]);
78996+ pax_open_kernel();
78997 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
78998-
78999+ pax_close_kernel();
79000 set_page_address(page, NULL);
79001 need_flush = 1;
79002 }
79003@@ -198,9 +199,11 @@ start:
79004 }
79005 }
79006 vaddr = PKMAP_ADDR(last_pkmap_nr);
79007+
79008+ pax_open_kernel();
79009 set_pte_at(&init_mm, vaddr,
79010 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
79011-
79012+ pax_close_kernel();
79013 pkmap_count[last_pkmap_nr] = 1;
79014 set_page_address(page, (void *)vaddr);
79015
79016diff --git a/mm/hugetlb.c b/mm/hugetlb.c
79017index 546db81..34830af 100644
79018--- a/mm/hugetlb.c
79019+++ b/mm/hugetlb.c
79020@@ -2008,15 +2008,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
79021 struct hstate *h = &default_hstate;
79022 unsigned long tmp;
79023 int ret;
79024+ ctl_table_no_const hugetlb_table;
79025
79026 tmp = h->max_huge_pages;
79027
79028 if (write && h->order >= MAX_ORDER)
79029 return -EINVAL;
79030
79031- table->data = &tmp;
79032- table->maxlen = sizeof(unsigned long);
79033- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
79034+ hugetlb_table = *table;
79035+ hugetlb_table.data = &tmp;
79036+ hugetlb_table.maxlen = sizeof(unsigned long);
79037+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
79038 if (ret)
79039 goto out;
79040
79041@@ -2073,15 +2075,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
79042 struct hstate *h = &default_hstate;
79043 unsigned long tmp;
79044 int ret;
79045+ ctl_table_no_const hugetlb_table;
79046
79047 tmp = h->nr_overcommit_huge_pages;
79048
79049 if (write && h->order >= MAX_ORDER)
79050 return -EINVAL;
79051
79052- table->data = &tmp;
79053- table->maxlen = sizeof(unsigned long);
79054- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
79055+ hugetlb_table = *table;
79056+ hugetlb_table.data = &tmp;
79057+ hugetlb_table.maxlen = sizeof(unsigned long);
79058+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
79059 if (ret)
79060 goto out;
79061
79062@@ -2511,6 +2515,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
79063 return 1;
79064 }
79065
79066+#ifdef CONFIG_PAX_SEGMEXEC
79067+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
79068+{
79069+ struct mm_struct *mm = vma->vm_mm;
79070+ struct vm_area_struct *vma_m;
79071+ unsigned long address_m;
79072+ pte_t *ptep_m;
79073+
79074+ vma_m = pax_find_mirror_vma(vma);
79075+ if (!vma_m)
79076+ return;
79077+
79078+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79079+ address_m = address + SEGMEXEC_TASK_SIZE;
79080+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
79081+ get_page(page_m);
79082+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
79083+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
79084+}
79085+#endif
79086+
79087 /*
79088 * Hugetlb_cow() should be called with page lock of the original hugepage held.
79089 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
79090@@ -2629,6 +2654,11 @@ retry_avoidcopy:
79091 make_huge_pte(vma, new_page, 1));
79092 page_remove_rmap(old_page);
79093 hugepage_add_new_anon_rmap(new_page, vma, address);
79094+
79095+#ifdef CONFIG_PAX_SEGMEXEC
79096+ pax_mirror_huge_pte(vma, address, new_page);
79097+#endif
79098+
79099 /* Make the old page be freed below */
79100 new_page = old_page;
79101 }
79102@@ -2788,6 +2818,10 @@ retry:
79103 && (vma->vm_flags & VM_SHARED)));
79104 set_huge_pte_at(mm, address, ptep, new_pte);
79105
79106+#ifdef CONFIG_PAX_SEGMEXEC
79107+ pax_mirror_huge_pte(vma, address, page);
79108+#endif
79109+
79110 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
79111 /* Optimization, do the COW without a second fault */
79112 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
79113@@ -2817,6 +2851,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79114 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
79115 struct hstate *h = hstate_vma(vma);
79116
79117+#ifdef CONFIG_PAX_SEGMEXEC
79118+ struct vm_area_struct *vma_m;
79119+#endif
79120+
79121 address &= huge_page_mask(h);
79122
79123 ptep = huge_pte_offset(mm, address);
79124@@ -2830,6 +2868,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79125 VM_FAULT_SET_HINDEX(hstate_index(h));
79126 }
79127
79128+#ifdef CONFIG_PAX_SEGMEXEC
79129+ vma_m = pax_find_mirror_vma(vma);
79130+ if (vma_m) {
79131+ unsigned long address_m;
79132+
79133+ if (vma->vm_start > vma_m->vm_start) {
79134+ address_m = address;
79135+ address -= SEGMEXEC_TASK_SIZE;
79136+ vma = vma_m;
79137+ h = hstate_vma(vma);
79138+ } else
79139+ address_m = address + SEGMEXEC_TASK_SIZE;
79140+
79141+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
79142+ return VM_FAULT_OOM;
79143+ address_m &= HPAGE_MASK;
79144+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
79145+ }
79146+#endif
79147+
79148 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
79149 if (!ptep)
79150 return VM_FAULT_OOM;
79151diff --git a/mm/internal.h b/mm/internal.h
79152index 9ba2110..eaf0674 100644
79153--- a/mm/internal.h
79154+++ b/mm/internal.h
79155@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
79156 * in mm/page_alloc.c
79157 */
79158 extern void __free_pages_bootmem(struct page *page, unsigned int order);
79159+extern void free_compound_page(struct page *page);
79160 extern void prep_compound_page(struct page *page, unsigned long order);
79161 #ifdef CONFIG_MEMORY_FAILURE
79162 extern bool is_free_buddy_page(struct page *page);
79163diff --git a/mm/kmemleak.c b/mm/kmemleak.c
79164index 752a705..6c3102e 100644
79165--- a/mm/kmemleak.c
79166+++ b/mm/kmemleak.c
79167@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
79168
79169 for (i = 0; i < object->trace_len; i++) {
79170 void *ptr = (void *)object->trace[i];
79171- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
79172+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
79173 }
79174 }
79175
79176@@ -1853,7 +1853,7 @@ static int __init kmemleak_late_init(void)
79177 return -ENOMEM;
79178 }
79179
79180- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
79181+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
79182 &kmemleak_fops);
79183 if (!dentry)
79184 pr_warning("Failed to create the debugfs kmemleak file\n");
79185diff --git a/mm/maccess.c b/mm/maccess.c
79186index d53adf9..03a24bf 100644
79187--- a/mm/maccess.c
79188+++ b/mm/maccess.c
79189@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
79190 set_fs(KERNEL_DS);
79191 pagefault_disable();
79192 ret = __copy_from_user_inatomic(dst,
79193- (__force const void __user *)src, size);
79194+ (const void __force_user *)src, size);
79195 pagefault_enable();
79196 set_fs(old_fs);
79197
79198@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
79199
79200 set_fs(KERNEL_DS);
79201 pagefault_disable();
79202- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
79203+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
79204 pagefault_enable();
79205 set_fs(old_fs);
79206
79207diff --git a/mm/madvise.c b/mm/madvise.c
79208index 03dfa5c..b032917 100644
79209--- a/mm/madvise.c
79210+++ b/mm/madvise.c
79211@@ -48,6 +48,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
79212 pgoff_t pgoff;
79213 unsigned long new_flags = vma->vm_flags;
79214
79215+#ifdef CONFIG_PAX_SEGMEXEC
79216+ struct vm_area_struct *vma_m;
79217+#endif
79218+
79219 switch (behavior) {
79220 case MADV_NORMAL:
79221 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
79222@@ -123,6 +127,13 @@ success:
79223 /*
79224 * vm_flags is protected by the mmap_sem held in write mode.
79225 */
79226+
79227+#ifdef CONFIG_PAX_SEGMEXEC
79228+ vma_m = pax_find_mirror_vma(vma);
79229+ if (vma_m)
79230+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
79231+#endif
79232+
79233 vma->vm_flags = new_flags;
79234
79235 out:
79236@@ -181,6 +192,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
79237 struct vm_area_struct ** prev,
79238 unsigned long start, unsigned long end)
79239 {
79240+
79241+#ifdef CONFIG_PAX_SEGMEXEC
79242+ struct vm_area_struct *vma_m;
79243+#endif
79244+
79245 *prev = vma;
79246 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
79247 return -EINVAL;
79248@@ -193,6 +209,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
79249 zap_page_range(vma, start, end - start, &details);
79250 } else
79251 zap_page_range(vma, start, end - start, NULL);
79252+
79253+#ifdef CONFIG_PAX_SEGMEXEC
79254+ vma_m = pax_find_mirror_vma(vma);
79255+ if (vma_m) {
79256+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
79257+ struct zap_details details = {
79258+ .nonlinear_vma = vma_m,
79259+ .last_index = ULONG_MAX,
79260+ };
79261+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
79262+ } else
79263+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
79264+ }
79265+#endif
79266+
79267 return 0;
79268 }
79269
79270@@ -397,6 +428,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
79271 if (end < start)
79272 goto out;
79273
79274+#ifdef CONFIG_PAX_SEGMEXEC
79275+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79276+ if (end > SEGMEXEC_TASK_SIZE)
79277+ goto out;
79278+ } else
79279+#endif
79280+
79281+ if (end > TASK_SIZE)
79282+ goto out;
79283+
79284 error = 0;
79285 if (end == start)
79286 goto out;
79287diff --git a/mm/memory-failure.c b/mm/memory-failure.c
79288index c6e4dd3..1f41988 100644
79289--- a/mm/memory-failure.c
79290+++ b/mm/memory-failure.c
79291@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
79292
79293 int sysctl_memory_failure_recovery __read_mostly = 1;
79294
79295-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
79296+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
79297
79298 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
79299
79300@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
79301 pfn, t->comm, t->pid);
79302 si.si_signo = SIGBUS;
79303 si.si_errno = 0;
79304- si.si_addr = (void *)addr;
79305+ si.si_addr = (void __user *)addr;
79306 #ifdef __ARCH_SI_TRAPNO
79307 si.si_trapno = trapno;
79308 #endif
79309@@ -760,7 +760,7 @@ static struct page_state {
79310 unsigned long res;
79311 char *msg;
79312 int (*action)(struct page *p, unsigned long pfn);
79313-} error_states[] = {
79314+} __do_const error_states[] = {
79315 { reserved, reserved, "reserved kernel", me_kernel },
79316 /*
79317 * free pages are specially detected outside this table:
79318@@ -1040,7 +1040,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
79319 }
79320
79321 nr_pages = 1 << compound_trans_order(hpage);
79322- atomic_long_add(nr_pages, &mce_bad_pages);
79323+ atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
79324
79325 /*
79326 * We need/can do nothing about count=0 pages.
79327@@ -1070,7 +1070,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
79328 if (!PageHWPoison(hpage)
79329 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
79330 || (p != hpage && TestSetPageHWPoison(hpage))) {
79331- atomic_long_sub(nr_pages, &mce_bad_pages);
79332+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79333 return 0;
79334 }
79335 set_page_hwpoison_huge_page(hpage);
79336@@ -1128,7 +1128,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
79337 }
79338 if (hwpoison_filter(p)) {
79339 if (TestClearPageHWPoison(p))
79340- atomic_long_sub(nr_pages, &mce_bad_pages);
79341+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79342 unlock_page(hpage);
79343 put_page(hpage);
79344 return 0;
79345@@ -1323,7 +1323,7 @@ int unpoison_memory(unsigned long pfn)
79346 return 0;
79347 }
79348 if (TestClearPageHWPoison(p))
79349- atomic_long_sub(nr_pages, &mce_bad_pages);
79350+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79351 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
79352 return 0;
79353 }
79354@@ -1337,7 +1337,7 @@ int unpoison_memory(unsigned long pfn)
79355 */
79356 if (TestClearPageHWPoison(page)) {
79357 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
79358- atomic_long_sub(nr_pages, &mce_bad_pages);
79359+ atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
79360 freeit = 1;
79361 if (PageHuge(page))
79362 clear_page_hwpoison_huge_page(page);
79363@@ -1442,7 +1442,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
79364 }
79365 done:
79366 if (!PageHWPoison(hpage))
79367- atomic_long_add(1 << compound_trans_order(hpage),
79368+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
79369 &mce_bad_pages);
79370 set_page_hwpoison_huge_page(hpage);
79371 dequeue_hwpoisoned_huge_page(hpage);
79372@@ -1583,7 +1583,7 @@ int soft_offline_page(struct page *page, int flags)
79373 return ret;
79374
79375 done:
79376- atomic_long_add(1, &mce_bad_pages);
79377+ atomic_long_add_unchecked(1, &mce_bad_pages);
79378 SetPageHWPoison(page);
79379 /* keep elevated page count for bad page */
79380 return ret;
79381diff --git a/mm/memory.c b/mm/memory.c
79382index bb1369f..efb96b5 100644
79383--- a/mm/memory.c
79384+++ b/mm/memory.c
79385@@ -433,6 +433,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
79386 free_pte_range(tlb, pmd, addr);
79387 } while (pmd++, addr = next, addr != end);
79388
79389+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
79390 start &= PUD_MASK;
79391 if (start < floor)
79392 return;
79393@@ -447,6 +448,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
79394 pmd = pmd_offset(pud, start);
79395 pud_clear(pud);
79396 pmd_free_tlb(tlb, pmd, start);
79397+#endif
79398+
79399 }
79400
79401 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
79402@@ -466,6 +469,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
79403 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
79404 } while (pud++, addr = next, addr != end);
79405
79406+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
79407 start &= PGDIR_MASK;
79408 if (start < floor)
79409 return;
79410@@ -480,6 +484,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
79411 pud = pud_offset(pgd, start);
79412 pgd_clear(pgd);
79413 pud_free_tlb(tlb, pud, start);
79414+#endif
79415+
79416 }
79417
79418 /*
79419@@ -1618,12 +1624,6 @@ no_page_table:
79420 return page;
79421 }
79422
79423-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
79424-{
79425- return stack_guard_page_start(vma, addr) ||
79426- stack_guard_page_end(vma, addr+PAGE_SIZE);
79427-}
79428-
79429 /**
79430 * __get_user_pages() - pin user pages in memory
79431 * @tsk: task_struct of target task
79432@@ -1709,10 +1709,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79433
79434 i = 0;
79435
79436- do {
79437+ while (nr_pages) {
79438 struct vm_area_struct *vma;
79439
79440- vma = find_extend_vma(mm, start);
79441+ vma = find_vma(mm, start);
79442 if (!vma && in_gate_area(mm, start)) {
79443 unsigned long pg = start & PAGE_MASK;
79444 pgd_t *pgd;
79445@@ -1760,7 +1760,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79446 goto next_page;
79447 }
79448
79449- if (!vma ||
79450+ if (!vma || start < vma->vm_start ||
79451 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
79452 !(vm_flags & vma->vm_flags))
79453 return i ? : -EFAULT;
79454@@ -1787,11 +1787,6 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
79455 int ret;
79456 unsigned int fault_flags = 0;
79457
79458- /* For mlock, just skip the stack guard page. */
79459- if (foll_flags & FOLL_MLOCK) {
79460- if (stack_guard_page(vma, start))
79461- goto next_page;
79462- }
79463 if (foll_flags & FOLL_WRITE)
79464 fault_flags |= FAULT_FLAG_WRITE;
79465 if (nonblocking)
79466@@ -1865,7 +1860,7 @@ next_page:
79467 start += PAGE_SIZE;
79468 nr_pages--;
79469 } while (nr_pages && start < vma->vm_end);
79470- } while (nr_pages);
79471+ }
79472 return i;
79473 }
79474 EXPORT_SYMBOL(__get_user_pages);
79475@@ -2072,6 +2067,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
79476 page_add_file_rmap(page);
79477 set_pte_at(mm, addr, pte, mk_pte(page, prot));
79478
79479+#ifdef CONFIG_PAX_SEGMEXEC
79480+ pax_mirror_file_pte(vma, addr, page, ptl);
79481+#endif
79482+
79483 retval = 0;
79484 pte_unmap_unlock(pte, ptl);
79485 return retval;
79486@@ -2116,9 +2115,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
79487 if (!page_count(page))
79488 return -EINVAL;
79489 if (!(vma->vm_flags & VM_MIXEDMAP)) {
79490+
79491+#ifdef CONFIG_PAX_SEGMEXEC
79492+ struct vm_area_struct *vma_m;
79493+#endif
79494+
79495 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
79496 BUG_ON(vma->vm_flags & VM_PFNMAP);
79497 vma->vm_flags |= VM_MIXEDMAP;
79498+
79499+#ifdef CONFIG_PAX_SEGMEXEC
79500+ vma_m = pax_find_mirror_vma(vma);
79501+ if (vma_m)
79502+ vma_m->vm_flags |= VM_MIXEDMAP;
79503+#endif
79504+
79505 }
79506 return insert_page(vma, addr, page, vma->vm_page_prot);
79507 }
79508@@ -2201,6 +2212,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
79509 unsigned long pfn)
79510 {
79511 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
79512+ BUG_ON(vma->vm_mirror);
79513
79514 if (addr < vma->vm_start || addr >= vma->vm_end)
79515 return -EFAULT;
79516@@ -2401,7 +2413,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
79517
79518 BUG_ON(pud_huge(*pud));
79519
79520- pmd = pmd_alloc(mm, pud, addr);
79521+ pmd = (mm == &init_mm) ?
79522+ pmd_alloc_kernel(mm, pud, addr) :
79523+ pmd_alloc(mm, pud, addr);
79524 if (!pmd)
79525 return -ENOMEM;
79526 do {
79527@@ -2421,7 +2435,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
79528 unsigned long next;
79529 int err;
79530
79531- pud = pud_alloc(mm, pgd, addr);
79532+ pud = (mm == &init_mm) ?
79533+ pud_alloc_kernel(mm, pgd, addr) :
79534+ pud_alloc(mm, pgd, addr);
79535 if (!pud)
79536 return -ENOMEM;
79537 do {
79538@@ -2509,6 +2525,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
79539 copy_user_highpage(dst, src, va, vma);
79540 }
79541
79542+#ifdef CONFIG_PAX_SEGMEXEC
79543+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
79544+{
79545+ struct mm_struct *mm = vma->vm_mm;
79546+ spinlock_t *ptl;
79547+ pte_t *pte, entry;
79548+
79549+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
79550+ entry = *pte;
79551+ if (!pte_present(entry)) {
79552+ if (!pte_none(entry)) {
79553+ BUG_ON(pte_file(entry));
79554+ free_swap_and_cache(pte_to_swp_entry(entry));
79555+ pte_clear_not_present_full(mm, address, pte, 0);
79556+ }
79557+ } else {
79558+ struct page *page;
79559+
79560+ flush_cache_page(vma, address, pte_pfn(entry));
79561+ entry = ptep_clear_flush(vma, address, pte);
79562+ BUG_ON(pte_dirty(entry));
79563+ page = vm_normal_page(vma, address, entry);
79564+ if (page) {
79565+ update_hiwater_rss(mm);
79566+ if (PageAnon(page))
79567+ dec_mm_counter_fast(mm, MM_ANONPAGES);
79568+ else
79569+ dec_mm_counter_fast(mm, MM_FILEPAGES);
79570+ page_remove_rmap(page);
79571+ page_cache_release(page);
79572+ }
79573+ }
79574+ pte_unmap_unlock(pte, ptl);
79575+}
79576+
79577+/* PaX: if vma is mirrored, synchronize the mirror's PTE
79578+ *
79579+ * the ptl of the lower mapped page is held on entry and is not released on exit
79580+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
79581+ */
79582+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
79583+{
79584+ struct mm_struct *mm = vma->vm_mm;
79585+ unsigned long address_m;
79586+ spinlock_t *ptl_m;
79587+ struct vm_area_struct *vma_m;
79588+ pmd_t *pmd_m;
79589+ pte_t *pte_m, entry_m;
79590+
79591+ BUG_ON(!page_m || !PageAnon(page_m));
79592+
79593+ vma_m = pax_find_mirror_vma(vma);
79594+ if (!vma_m)
79595+ return;
79596+
79597+ BUG_ON(!PageLocked(page_m));
79598+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79599+ address_m = address + SEGMEXEC_TASK_SIZE;
79600+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
79601+ pte_m = pte_offset_map(pmd_m, address_m);
79602+ ptl_m = pte_lockptr(mm, pmd_m);
79603+ if (ptl != ptl_m) {
79604+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
79605+ if (!pte_none(*pte_m))
79606+ goto out;
79607+ }
79608+
79609+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
79610+ page_cache_get(page_m);
79611+ page_add_anon_rmap(page_m, vma_m, address_m);
79612+ inc_mm_counter_fast(mm, MM_ANONPAGES);
79613+ set_pte_at(mm, address_m, pte_m, entry_m);
79614+ update_mmu_cache(vma_m, address_m, entry_m);
79615+out:
79616+ if (ptl != ptl_m)
79617+ spin_unlock(ptl_m);
79618+ pte_unmap(pte_m);
79619+ unlock_page(page_m);
79620+}
79621+
79622+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
79623+{
79624+ struct mm_struct *mm = vma->vm_mm;
79625+ unsigned long address_m;
79626+ spinlock_t *ptl_m;
79627+ struct vm_area_struct *vma_m;
79628+ pmd_t *pmd_m;
79629+ pte_t *pte_m, entry_m;
79630+
79631+ BUG_ON(!page_m || PageAnon(page_m));
79632+
79633+ vma_m = pax_find_mirror_vma(vma);
79634+ if (!vma_m)
79635+ return;
79636+
79637+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79638+ address_m = address + SEGMEXEC_TASK_SIZE;
79639+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
79640+ pte_m = pte_offset_map(pmd_m, address_m);
79641+ ptl_m = pte_lockptr(mm, pmd_m);
79642+ if (ptl != ptl_m) {
79643+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
79644+ if (!pte_none(*pte_m))
79645+ goto out;
79646+ }
79647+
79648+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
79649+ page_cache_get(page_m);
79650+ page_add_file_rmap(page_m);
79651+ inc_mm_counter_fast(mm, MM_FILEPAGES);
79652+ set_pte_at(mm, address_m, pte_m, entry_m);
79653+ update_mmu_cache(vma_m, address_m, entry_m);
79654+out:
79655+ if (ptl != ptl_m)
79656+ spin_unlock(ptl_m);
79657+ pte_unmap(pte_m);
79658+}
79659+
79660+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
79661+{
79662+ struct mm_struct *mm = vma->vm_mm;
79663+ unsigned long address_m;
79664+ spinlock_t *ptl_m;
79665+ struct vm_area_struct *vma_m;
79666+ pmd_t *pmd_m;
79667+ pte_t *pte_m, entry_m;
79668+
79669+ vma_m = pax_find_mirror_vma(vma);
79670+ if (!vma_m)
79671+ return;
79672+
79673+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
79674+ address_m = address + SEGMEXEC_TASK_SIZE;
79675+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
79676+ pte_m = pte_offset_map(pmd_m, address_m);
79677+ ptl_m = pte_lockptr(mm, pmd_m);
79678+ if (ptl != ptl_m) {
79679+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
79680+ if (!pte_none(*pte_m))
79681+ goto out;
79682+ }
79683+
79684+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
79685+ set_pte_at(mm, address_m, pte_m, entry_m);
79686+out:
79687+ if (ptl != ptl_m)
79688+ spin_unlock(ptl_m);
79689+ pte_unmap(pte_m);
79690+}
79691+
79692+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
79693+{
79694+ struct page *page_m;
79695+ pte_t entry;
79696+
79697+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
79698+ goto out;
79699+
79700+ entry = *pte;
79701+ page_m = vm_normal_page(vma, address, entry);
79702+ if (!page_m)
79703+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
79704+ else if (PageAnon(page_m)) {
79705+ if (pax_find_mirror_vma(vma)) {
79706+ pte_unmap_unlock(pte, ptl);
79707+ lock_page(page_m);
79708+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
79709+ if (pte_same(entry, *pte))
79710+ pax_mirror_anon_pte(vma, address, page_m, ptl);
79711+ else
79712+ unlock_page(page_m);
79713+ }
79714+ } else
79715+ pax_mirror_file_pte(vma, address, page_m, ptl);
79716+
79717+out:
79718+ pte_unmap_unlock(pte, ptl);
79719+}
79720+#endif
79721+
79722 /*
79723 * This routine handles present pages, when users try to write
79724 * to a shared page. It is done by copying the page to a new address
79725@@ -2725,6 +2921,12 @@ gotten:
79726 */
79727 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
79728 if (likely(pte_same(*page_table, orig_pte))) {
79729+
79730+#ifdef CONFIG_PAX_SEGMEXEC
79731+ if (pax_find_mirror_vma(vma))
79732+ BUG_ON(!trylock_page(new_page));
79733+#endif
79734+
79735 if (old_page) {
79736 if (!PageAnon(old_page)) {
79737 dec_mm_counter_fast(mm, MM_FILEPAGES);
79738@@ -2776,6 +2978,10 @@ gotten:
79739 page_remove_rmap(old_page);
79740 }
79741
79742+#ifdef CONFIG_PAX_SEGMEXEC
79743+ pax_mirror_anon_pte(vma, address, new_page, ptl);
79744+#endif
79745+
79746 /* Free the old page.. */
79747 new_page = old_page;
79748 ret |= VM_FAULT_WRITE;
79749@@ -3051,6 +3257,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
79750 swap_free(entry);
79751 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
79752 try_to_free_swap(page);
79753+
79754+#ifdef CONFIG_PAX_SEGMEXEC
79755+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
79756+#endif
79757+
79758 unlock_page(page);
79759 if (swapcache) {
79760 /*
79761@@ -3074,6 +3285,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
79762
79763 /* No need to invalidate - it was non-present before */
79764 update_mmu_cache(vma, address, page_table);
79765+
79766+#ifdef CONFIG_PAX_SEGMEXEC
79767+ pax_mirror_anon_pte(vma, address, page, ptl);
79768+#endif
79769+
79770 unlock:
79771 pte_unmap_unlock(page_table, ptl);
79772 out:
79773@@ -3093,40 +3309,6 @@ out_release:
79774 }
79775
79776 /*
79777- * This is like a special single-page "expand_{down|up}wards()",
79778- * except we must first make sure that 'address{-|+}PAGE_SIZE'
79779- * doesn't hit another vma.
79780- */
79781-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
79782-{
79783- address &= PAGE_MASK;
79784- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
79785- struct vm_area_struct *prev = vma->vm_prev;
79786-
79787- /*
79788- * Is there a mapping abutting this one below?
79789- *
79790- * That's only ok if it's the same stack mapping
79791- * that has gotten split..
79792- */
79793- if (prev && prev->vm_end == address)
79794- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
79795-
79796- expand_downwards(vma, address - PAGE_SIZE);
79797- }
79798- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
79799- struct vm_area_struct *next = vma->vm_next;
79800-
79801- /* As VM_GROWSDOWN but s/below/above/ */
79802- if (next && next->vm_start == address + PAGE_SIZE)
79803- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
79804-
79805- expand_upwards(vma, address + PAGE_SIZE);
79806- }
79807- return 0;
79808-}
79809-
79810-/*
79811 * We enter with non-exclusive mmap_sem (to exclude vma changes,
79812 * but allow concurrent faults), and pte mapped but not yet locked.
79813 * We return with mmap_sem still held, but pte unmapped and unlocked.
79814@@ -3135,27 +3317,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
79815 unsigned long address, pte_t *page_table, pmd_t *pmd,
79816 unsigned int flags)
79817 {
79818- struct page *page;
79819+ struct page *page = NULL;
79820 spinlock_t *ptl;
79821 pte_t entry;
79822
79823- pte_unmap(page_table);
79824-
79825- /* Check if we need to add a guard page to the stack */
79826- if (check_stack_guard_page(vma, address) < 0)
79827- return VM_FAULT_SIGBUS;
79828-
79829- /* Use the zero-page for reads */
79830 if (!(flags & FAULT_FLAG_WRITE)) {
79831 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
79832 vma->vm_page_prot));
79833- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
79834+ ptl = pte_lockptr(mm, pmd);
79835+ spin_lock(ptl);
79836 if (!pte_none(*page_table))
79837 goto unlock;
79838 goto setpte;
79839 }
79840
79841 /* Allocate our own private page. */
79842+ pte_unmap(page_table);
79843+
79844 if (unlikely(anon_vma_prepare(vma)))
79845 goto oom;
79846 page = alloc_zeroed_user_highpage_movable(vma, address);
79847@@ -3174,6 +3352,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
79848 if (!pte_none(*page_table))
79849 goto release;
79850
79851+#ifdef CONFIG_PAX_SEGMEXEC
79852+ if (pax_find_mirror_vma(vma))
79853+ BUG_ON(!trylock_page(page));
79854+#endif
79855+
79856 inc_mm_counter_fast(mm, MM_ANONPAGES);
79857 page_add_new_anon_rmap(page, vma, address);
79858 setpte:
79859@@ -3181,6 +3364,12 @@ setpte:
79860
79861 /* No need to invalidate - it was non-present before */
79862 update_mmu_cache(vma, address, page_table);
79863+
79864+#ifdef CONFIG_PAX_SEGMEXEC
79865+ if (page)
79866+ pax_mirror_anon_pte(vma, address, page, ptl);
79867+#endif
79868+
79869 unlock:
79870 pte_unmap_unlock(page_table, ptl);
79871 return 0;
79872@@ -3324,6 +3513,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79873 */
79874 /* Only go through if we didn't race with anybody else... */
79875 if (likely(pte_same(*page_table, orig_pte))) {
79876+
79877+#ifdef CONFIG_PAX_SEGMEXEC
79878+ if (anon && pax_find_mirror_vma(vma))
79879+ BUG_ON(!trylock_page(page));
79880+#endif
79881+
79882 flush_icache_page(vma, page);
79883 entry = mk_pte(page, vma->vm_page_prot);
79884 if (flags & FAULT_FLAG_WRITE)
79885@@ -3343,6 +3538,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79886
79887 /* no need to invalidate: a not-present page won't be cached */
79888 update_mmu_cache(vma, address, page_table);
79889+
79890+#ifdef CONFIG_PAX_SEGMEXEC
79891+ if (anon)
79892+ pax_mirror_anon_pte(vma, address, page, ptl);
79893+ else
79894+ pax_mirror_file_pte(vma, address, page, ptl);
79895+#endif
79896+
79897 } else {
79898 if (cow_page)
79899 mem_cgroup_uncharge_page(cow_page);
79900@@ -3664,6 +3867,12 @@ int handle_pte_fault(struct mm_struct *mm,
79901 if (flags & FAULT_FLAG_WRITE)
79902 flush_tlb_fix_spurious_fault(vma, address);
79903 }
79904+
79905+#ifdef CONFIG_PAX_SEGMEXEC
79906+ pax_mirror_pte(vma, address, pte, pmd, ptl);
79907+ return 0;
79908+#endif
79909+
79910 unlock:
79911 pte_unmap_unlock(pte, ptl);
79912 return 0;
79913@@ -3680,6 +3889,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79914 pmd_t *pmd;
79915 pte_t *pte;
79916
79917+#ifdef CONFIG_PAX_SEGMEXEC
79918+ struct vm_area_struct *vma_m;
79919+#endif
79920+
79921 __set_current_state(TASK_RUNNING);
79922
79923 count_vm_event(PGFAULT);
79924@@ -3691,6 +3904,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
79925 if (unlikely(is_vm_hugetlb_page(vma)))
79926 return hugetlb_fault(mm, vma, address, flags);
79927
79928+#ifdef CONFIG_PAX_SEGMEXEC
79929+ vma_m = pax_find_mirror_vma(vma);
79930+ if (vma_m) {
79931+ unsigned long address_m;
79932+ pgd_t *pgd_m;
79933+ pud_t *pud_m;
79934+ pmd_t *pmd_m;
79935+
79936+ if (vma->vm_start > vma_m->vm_start) {
79937+ address_m = address;
79938+ address -= SEGMEXEC_TASK_SIZE;
79939+ vma = vma_m;
79940+ } else
79941+ address_m = address + SEGMEXEC_TASK_SIZE;
79942+
79943+ pgd_m = pgd_offset(mm, address_m);
79944+ pud_m = pud_alloc(mm, pgd_m, address_m);
79945+ if (!pud_m)
79946+ return VM_FAULT_OOM;
79947+ pmd_m = pmd_alloc(mm, pud_m, address_m);
79948+ if (!pmd_m)
79949+ return VM_FAULT_OOM;
79950+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
79951+ return VM_FAULT_OOM;
79952+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
79953+ }
79954+#endif
79955+
79956 retry:
79957 pgd = pgd_offset(mm, address);
79958 pud = pud_alloc(mm, pgd, address);
79959@@ -3789,6 +4030,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79960 spin_unlock(&mm->page_table_lock);
79961 return 0;
79962 }
79963+
79964+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
79965+{
79966+ pud_t *new = pud_alloc_one(mm, address);
79967+ if (!new)
79968+ return -ENOMEM;
79969+
79970+ smp_wmb(); /* See comment in __pte_alloc */
79971+
79972+ spin_lock(&mm->page_table_lock);
79973+ if (pgd_present(*pgd)) /* Another has populated it */
79974+ pud_free(mm, new);
79975+ else
79976+ pgd_populate_kernel(mm, pgd, new);
79977+ spin_unlock(&mm->page_table_lock);
79978+ return 0;
79979+}
79980 #endif /* __PAGETABLE_PUD_FOLDED */
79981
79982 #ifndef __PAGETABLE_PMD_FOLDED
79983@@ -3819,6 +4077,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
79984 spin_unlock(&mm->page_table_lock);
79985 return 0;
79986 }
79987+
79988+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
79989+{
79990+ pmd_t *new = pmd_alloc_one(mm, address);
79991+ if (!new)
79992+ return -ENOMEM;
79993+
79994+ smp_wmb(); /* See comment in __pte_alloc */
79995+
79996+ spin_lock(&mm->page_table_lock);
79997+#ifndef __ARCH_HAS_4LEVEL_HACK
79998+ if (pud_present(*pud)) /* Another has populated it */
79999+ pmd_free(mm, new);
80000+ else
80001+ pud_populate_kernel(mm, pud, new);
80002+#else
80003+ if (pgd_present(*pud)) /* Another has populated it */
80004+ pmd_free(mm, new);
80005+ else
80006+ pgd_populate_kernel(mm, pud, new);
80007+#endif /* __ARCH_HAS_4LEVEL_HACK */
80008+ spin_unlock(&mm->page_table_lock);
80009+ return 0;
80010+}
80011 #endif /* __PAGETABLE_PMD_FOLDED */
80012
80013 int make_pages_present(unsigned long addr, unsigned long end)
80014@@ -3856,7 +4138,7 @@ static int __init gate_vma_init(void)
80015 gate_vma.vm_start = FIXADDR_USER_START;
80016 gate_vma.vm_end = FIXADDR_USER_END;
80017 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
80018- gate_vma.vm_page_prot = __P101;
80019+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
80020
80021 return 0;
80022 }
80023diff --git a/mm/mempolicy.c b/mm/mempolicy.c
80024index e2df1c1..1e31d57 100644
80025--- a/mm/mempolicy.c
80026+++ b/mm/mempolicy.c
80027@@ -721,6 +721,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
80028 unsigned long vmstart;
80029 unsigned long vmend;
80030
80031+#ifdef CONFIG_PAX_SEGMEXEC
80032+ struct vm_area_struct *vma_m;
80033+#endif
80034+
80035 vma = find_vma(mm, start);
80036 if (!vma || vma->vm_start > start)
80037 return -EFAULT;
80038@@ -757,9 +761,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
80039 if (err)
80040 goto out;
80041 }
80042+
80043 err = vma_replace_policy(vma, new_pol);
80044 if (err)
80045 goto out;
80046+
80047+#ifdef CONFIG_PAX_SEGMEXEC
80048+ vma_m = pax_find_mirror_vma(vma);
80049+ if (vma_m) {
80050+ err = vma_replace_policy(vma_m, new_pol);
80051+ if (err)
80052+ goto out;
80053+ }
80054+#endif
80055+
80056 }
80057
80058 out:
80059@@ -1216,6 +1231,17 @@ static long do_mbind(unsigned long start, unsigned long len,
80060
80061 if (end < start)
80062 return -EINVAL;
80063+
80064+#ifdef CONFIG_PAX_SEGMEXEC
80065+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
80066+ if (end > SEGMEXEC_TASK_SIZE)
80067+ return -EINVAL;
80068+ } else
80069+#endif
80070+
80071+ if (end > TASK_SIZE)
80072+ return -EINVAL;
80073+
80074 if (end == start)
80075 return 0;
80076
80077@@ -1445,8 +1471,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
80078 */
80079 tcred = __task_cred(task);
80080 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
80081- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
80082- !capable(CAP_SYS_NICE)) {
80083+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
80084 rcu_read_unlock();
80085 err = -EPERM;
80086 goto out_put;
80087@@ -1477,6 +1502,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
80088 goto out;
80089 }
80090
80091+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
80092+ if (mm != current->mm &&
80093+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
80094+ mmput(mm);
80095+ err = -EPERM;
80096+ goto out;
80097+ }
80098+#endif
80099+
80100 err = do_migrate_pages(mm, old, new,
80101 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
80102
80103diff --git a/mm/migrate.c b/mm/migrate.c
80104index 2fd8b4a..d70358f 100644
80105--- a/mm/migrate.c
80106+++ b/mm/migrate.c
80107@@ -1401,8 +1401,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
80108 */
80109 tcred = __task_cred(task);
80110 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
80111- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
80112- !capable(CAP_SYS_NICE)) {
80113+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
80114 rcu_read_unlock();
80115 err = -EPERM;
80116 goto out;
80117diff --git a/mm/mlock.c b/mm/mlock.c
80118index c9bd528..da8d069 100644
80119--- a/mm/mlock.c
80120+++ b/mm/mlock.c
80121@@ -13,6 +13,7 @@
80122 #include <linux/pagemap.h>
80123 #include <linux/mempolicy.h>
80124 #include <linux/syscalls.h>
80125+#include <linux/security.h>
80126 #include <linux/sched.h>
80127 #include <linux/export.h>
80128 #include <linux/rmap.h>
80129@@ -369,7 +370,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
80130 {
80131 unsigned long nstart, end, tmp;
80132 struct vm_area_struct * vma, * prev;
80133- int error;
80134+ int error = 0;
80135
80136 VM_BUG_ON(start & ~PAGE_MASK);
80137 VM_BUG_ON(len != PAGE_ALIGN(len));
80138@@ -378,6 +379,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
80139 return -EINVAL;
80140 if (end == start)
80141 return 0;
80142+ if (end > TASK_SIZE)
80143+ return -EINVAL;
80144+
80145 vma = find_vma(current->mm, start);
80146 if (!vma || vma->vm_start > start)
80147 return -ENOMEM;
80148@@ -389,6 +393,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
80149 for (nstart = start ; ; ) {
80150 vm_flags_t newflags;
80151
80152+#ifdef CONFIG_PAX_SEGMEXEC
80153+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
80154+ break;
80155+#endif
80156+
80157 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
80158
80159 newflags = vma->vm_flags | VM_LOCKED;
80160@@ -494,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
80161 lock_limit >>= PAGE_SHIFT;
80162
80163 /* check against resource limits */
80164+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
80165 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
80166 error = do_mlock(start, len, 1);
80167 up_write(&current->mm->mmap_sem);
80168@@ -528,6 +538,12 @@ static int do_mlockall(int flags)
80169 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
80170 vm_flags_t newflags;
80171
80172+#ifdef CONFIG_PAX_SEGMEXEC
80173+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
80174+ break;
80175+#endif
80176+
80177+ BUG_ON(vma->vm_end > TASK_SIZE);
80178 newflags = vma->vm_flags | VM_LOCKED;
80179 if (!(flags & MCL_CURRENT))
80180 newflags &= ~VM_LOCKED;
80181@@ -560,6 +576,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
80182 lock_limit >>= PAGE_SHIFT;
80183
80184 ret = -ENOMEM;
80185+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
80186 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
80187 capable(CAP_IPC_LOCK))
80188 ret = do_mlockall(flags);
80189diff --git a/mm/mmap.c b/mm/mmap.c
80190index 8832b87..7d36e4f 100644
80191--- a/mm/mmap.c
80192+++ b/mm/mmap.c
80193@@ -32,6 +32,7 @@
80194 #include <linux/khugepaged.h>
80195 #include <linux/uprobes.h>
80196 #include <linux/rbtree_augmented.h>
80197+#include <linux/random.h>
80198
80199 #include <asm/uaccess.h>
80200 #include <asm/cacheflush.h>
80201@@ -48,6 +49,16 @@
80202 #define arch_rebalance_pgtables(addr, len) (addr)
80203 #endif
80204
80205+static inline void verify_mm_writelocked(struct mm_struct *mm)
80206+{
80207+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
80208+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
80209+ up_read(&mm->mmap_sem);
80210+ BUG();
80211+ }
80212+#endif
80213+}
80214+
80215 static void unmap_region(struct mm_struct *mm,
80216 struct vm_area_struct *vma, struct vm_area_struct *prev,
80217 unsigned long start, unsigned long end);
80218@@ -67,22 +78,32 @@ static void unmap_region(struct mm_struct *mm,
80219 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
80220 *
80221 */
80222-pgprot_t protection_map[16] = {
80223+pgprot_t protection_map[16] __read_only = {
80224 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
80225 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
80226 };
80227
80228-pgprot_t vm_get_page_prot(unsigned long vm_flags)
80229+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
80230 {
80231- return __pgprot(pgprot_val(protection_map[vm_flags &
80232+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
80233 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
80234 pgprot_val(arch_vm_get_page_prot(vm_flags)));
80235+
80236+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80237+ if (!(__supported_pte_mask & _PAGE_NX) &&
80238+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
80239+ (vm_flags & (VM_READ | VM_WRITE)))
80240+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
80241+#endif
80242+
80243+ return prot;
80244 }
80245 EXPORT_SYMBOL(vm_get_page_prot);
80246
80247 int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
80248 int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
80249 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
80250+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
80251 /*
80252 * Make sure vm_committed_as in one cacheline and not cacheline shared with
80253 * other variables. It can be updated by several CPUs frequently.
80254@@ -238,6 +259,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
80255 struct vm_area_struct *next = vma->vm_next;
80256
80257 might_sleep();
80258+ BUG_ON(vma->vm_mirror);
80259 if (vma->vm_ops && vma->vm_ops->close)
80260 vma->vm_ops->close(vma);
80261 if (vma->vm_file)
80262@@ -281,6 +303,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
80263 * not page aligned -Ram Gupta
80264 */
80265 rlim = rlimit(RLIMIT_DATA);
80266+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
80267 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
80268 (mm->end_data - mm->start_data) > rlim)
80269 goto out;
80270@@ -888,6 +911,12 @@ static int
80271 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
80272 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
80273 {
80274+
80275+#ifdef CONFIG_PAX_SEGMEXEC
80276+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
80277+ return 0;
80278+#endif
80279+
80280 if (is_mergeable_vma(vma, file, vm_flags) &&
80281 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
80282 if (vma->vm_pgoff == vm_pgoff)
80283@@ -907,6 +936,12 @@ static int
80284 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
80285 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
80286 {
80287+
80288+#ifdef CONFIG_PAX_SEGMEXEC
80289+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
80290+ return 0;
80291+#endif
80292+
80293 if (is_mergeable_vma(vma, file, vm_flags) &&
80294 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
80295 pgoff_t vm_pglen;
80296@@ -949,13 +984,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
80297 struct vm_area_struct *vma_merge(struct mm_struct *mm,
80298 struct vm_area_struct *prev, unsigned long addr,
80299 unsigned long end, unsigned long vm_flags,
80300- struct anon_vma *anon_vma, struct file *file,
80301+ struct anon_vma *anon_vma, struct file *file,
80302 pgoff_t pgoff, struct mempolicy *policy)
80303 {
80304 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
80305 struct vm_area_struct *area, *next;
80306 int err;
80307
80308+#ifdef CONFIG_PAX_SEGMEXEC
80309+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
80310+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
80311+
80312+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
80313+#endif
80314+
80315 /*
80316 * We later require that vma->vm_flags == vm_flags,
80317 * so this tests vma->vm_flags & VM_SPECIAL, too.
80318@@ -971,6 +1013,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
80319 if (next && next->vm_end == end) /* cases 6, 7, 8 */
80320 next = next->vm_next;
80321
80322+#ifdef CONFIG_PAX_SEGMEXEC
80323+ if (prev)
80324+ prev_m = pax_find_mirror_vma(prev);
80325+ if (area)
80326+ area_m = pax_find_mirror_vma(area);
80327+ if (next)
80328+ next_m = pax_find_mirror_vma(next);
80329+#endif
80330+
80331 /*
80332 * Can it merge with the predecessor?
80333 */
80334@@ -990,9 +1041,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
80335 /* cases 1, 6 */
80336 err = vma_adjust(prev, prev->vm_start,
80337 next->vm_end, prev->vm_pgoff, NULL);
80338- } else /* cases 2, 5, 7 */
80339+
80340+#ifdef CONFIG_PAX_SEGMEXEC
80341+ if (!err && prev_m)
80342+ err = vma_adjust(prev_m, prev_m->vm_start,
80343+ next_m->vm_end, prev_m->vm_pgoff, NULL);
80344+#endif
80345+
80346+ } else { /* cases 2, 5, 7 */
80347 err = vma_adjust(prev, prev->vm_start,
80348 end, prev->vm_pgoff, NULL);
80349+
80350+#ifdef CONFIG_PAX_SEGMEXEC
80351+ if (!err && prev_m)
80352+ err = vma_adjust(prev_m, prev_m->vm_start,
80353+ end_m, prev_m->vm_pgoff, NULL);
80354+#endif
80355+
80356+ }
80357 if (err)
80358 return NULL;
80359 khugepaged_enter_vma_merge(prev);
80360@@ -1006,12 +1072,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
80361 mpol_equal(policy, vma_policy(next)) &&
80362 can_vma_merge_before(next, vm_flags,
80363 anon_vma, file, pgoff+pglen)) {
80364- if (prev && addr < prev->vm_end) /* case 4 */
80365+ if (prev && addr < prev->vm_end) { /* case 4 */
80366 err = vma_adjust(prev, prev->vm_start,
80367 addr, prev->vm_pgoff, NULL);
80368- else /* cases 3, 8 */
80369+
80370+#ifdef CONFIG_PAX_SEGMEXEC
80371+ if (!err && prev_m)
80372+ err = vma_adjust(prev_m, prev_m->vm_start,
80373+ addr_m, prev_m->vm_pgoff, NULL);
80374+#endif
80375+
80376+ } else { /* cases 3, 8 */
80377 err = vma_adjust(area, addr, next->vm_end,
80378 next->vm_pgoff - pglen, NULL);
80379+
80380+#ifdef CONFIG_PAX_SEGMEXEC
80381+ if (!err && area_m)
80382+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
80383+ next_m->vm_pgoff - pglen, NULL);
80384+#endif
80385+
80386+ }
80387 if (err)
80388 return NULL;
80389 khugepaged_enter_vma_merge(area);
80390@@ -1120,16 +1201,13 @@ none:
80391 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
80392 struct file *file, long pages)
80393 {
80394- const unsigned long stack_flags
80395- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
80396-
80397 mm->total_vm += pages;
80398
80399 if (file) {
80400 mm->shared_vm += pages;
80401 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
80402 mm->exec_vm += pages;
80403- } else if (flags & stack_flags)
80404+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
80405 mm->stack_vm += pages;
80406 }
80407 #endif /* CONFIG_PROC_FS */
80408@@ -1165,7 +1243,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80409 * (the exception is when the underlying filesystem is noexec
80410 * mounted, in which case we dont add PROT_EXEC.)
80411 */
80412- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
80413+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
80414 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
80415 prot |= PROT_EXEC;
80416
80417@@ -1191,7 +1269,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80418 /* Obtain the address to map to. we verify (or select) it and ensure
80419 * that it represents a valid section of the address space.
80420 */
80421- addr = get_unmapped_area(file, addr, len, pgoff, flags);
80422+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
80423 if (addr & ~PAGE_MASK)
80424 return addr;
80425
80426@@ -1202,6 +1280,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80427 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
80428 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
80429
80430+#ifdef CONFIG_PAX_MPROTECT
80431+ if (mm->pax_flags & MF_PAX_MPROTECT) {
80432+#ifndef CONFIG_PAX_MPROTECT_COMPAT
80433+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
80434+ gr_log_rwxmmap(file);
80435+
80436+#ifdef CONFIG_PAX_EMUPLT
80437+ vm_flags &= ~VM_EXEC;
80438+#else
80439+ return -EPERM;
80440+#endif
80441+
80442+ }
80443+
80444+ if (!(vm_flags & VM_EXEC))
80445+ vm_flags &= ~VM_MAYEXEC;
80446+#else
80447+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
80448+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
80449+#endif
80450+ else
80451+ vm_flags &= ~VM_MAYWRITE;
80452+ }
80453+#endif
80454+
80455+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80456+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
80457+ vm_flags &= ~VM_PAGEEXEC;
80458+#endif
80459+
80460 if (flags & MAP_LOCKED)
80461 if (!can_do_mlock())
80462 return -EPERM;
80463@@ -1213,6 +1321,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80464 locked += mm->locked_vm;
80465 lock_limit = rlimit(RLIMIT_MEMLOCK);
80466 lock_limit >>= PAGE_SHIFT;
80467+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
80468 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
80469 return -EAGAIN;
80470 }
80471@@ -1279,6 +1388,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
80472 }
80473 }
80474
80475+ if (!gr_acl_handle_mmap(file, prot))
80476+ return -EACCES;
80477+
80478 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
80479 }
80480
80481@@ -1356,7 +1468,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
80482 vm_flags_t vm_flags = vma->vm_flags;
80483
80484 /* If it was private or non-writable, the write bit is already clear */
80485- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
80486+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
80487 return 0;
80488
80489 /* The backer wishes to know when pages are first written to? */
80490@@ -1405,13 +1517,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
80491 unsigned long charged = 0;
80492 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
80493
80494+#ifdef CONFIG_PAX_SEGMEXEC
80495+ struct vm_area_struct *vma_m = NULL;
80496+#endif
80497+
80498+ /*
80499+ * mm->mmap_sem is required to protect against another thread
80500+ * changing the mappings in case we sleep.
80501+ */
80502+ verify_mm_writelocked(mm);
80503+
80504 /* Clear old maps */
80505 error = -ENOMEM;
80506-munmap_back:
80507 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
80508 if (do_munmap(mm, addr, len))
80509 return -ENOMEM;
80510- goto munmap_back;
80511+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
80512 }
80513
80514 /* Check against address space limit. */
80515@@ -1460,6 +1581,16 @@ munmap_back:
80516 goto unacct_error;
80517 }
80518
80519+#ifdef CONFIG_PAX_SEGMEXEC
80520+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
80521+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
80522+ if (!vma_m) {
80523+ error = -ENOMEM;
80524+ goto free_vma;
80525+ }
80526+ }
80527+#endif
80528+
80529 vma->vm_mm = mm;
80530 vma->vm_start = addr;
80531 vma->vm_end = addr + len;
80532@@ -1484,6 +1615,13 @@ munmap_back:
80533 if (error)
80534 goto unmap_and_free_vma;
80535
80536+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
80537+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
80538+ vma->vm_flags |= VM_PAGEEXEC;
80539+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
80540+ }
80541+#endif
80542+
80543 /* Can addr have changed??
80544 *
80545 * Answer: Yes, several device drivers can do it in their
80546@@ -1522,6 +1660,11 @@ munmap_back:
80547 vma_link(mm, vma, prev, rb_link, rb_parent);
80548 file = vma->vm_file;
80549
80550+#ifdef CONFIG_PAX_SEGMEXEC
80551+ if (vma_m)
80552+ BUG_ON(pax_mirror_vma(vma_m, vma));
80553+#endif
80554+
80555 /* Once vma denies write, undo our temporary denial count */
80556 if (correct_wcount)
80557 atomic_inc(&inode->i_writecount);
80558@@ -1529,6 +1672,7 @@ out:
80559 perf_event_mmap(vma);
80560
80561 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
80562+ track_exec_limit(mm, addr, addr + len, vm_flags);
80563 if (vm_flags & VM_LOCKED) {
80564 if (!mlock_vma_pages_range(vma, addr, addr + len))
80565 mm->locked_vm += (len >> PAGE_SHIFT);
80566@@ -1550,6 +1694,12 @@ unmap_and_free_vma:
80567 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
80568 charged = 0;
80569 free_vma:
80570+
80571+#ifdef CONFIG_PAX_SEGMEXEC
80572+ if (vma_m)
80573+ kmem_cache_free(vm_area_cachep, vma_m);
80574+#endif
80575+
80576 kmem_cache_free(vm_area_cachep, vma);
80577 unacct_error:
80578 if (charged)
80579@@ -1557,6 +1707,62 @@ unacct_error:
80580 return error;
80581 }
80582
80583+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
80584+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
80585+{
80586+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
80587+ return (random32() & 0xFF) << PAGE_SHIFT;
80588+
80589+ return 0;
80590+}
80591+#endif
80592+
80593+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
80594+{
80595+ if (!vma) {
80596+#ifdef CONFIG_STACK_GROWSUP
80597+ if (addr > sysctl_heap_stack_gap)
80598+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
80599+ else
80600+ vma = find_vma(current->mm, 0);
80601+ if (vma && (vma->vm_flags & VM_GROWSUP))
80602+ return false;
80603+#endif
80604+ return true;
80605+ }
80606+
80607+ if (addr + len > vma->vm_start)
80608+ return false;
80609+
80610+ if (vma->vm_flags & VM_GROWSDOWN)
80611+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
80612+#ifdef CONFIG_STACK_GROWSUP
80613+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
80614+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
80615+#endif
80616+ else if (offset)
80617+ return offset <= vma->vm_start - addr - len;
80618+
80619+ return true;
80620+}
80621+
80622+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
80623+{
80624+ if (vma->vm_start < len)
80625+ return -ENOMEM;
80626+
80627+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
80628+ if (offset <= vma->vm_start - len)
80629+ return vma->vm_start - len - offset;
80630+ else
80631+ return -ENOMEM;
80632+ }
80633+
80634+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
80635+ return vma->vm_start - len - sysctl_heap_stack_gap;
80636+ return -ENOMEM;
80637+}
80638+
80639 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
80640 {
80641 /*
80642@@ -1776,6 +1982,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80643 struct mm_struct *mm = current->mm;
80644 struct vm_area_struct *vma;
80645 struct vm_unmapped_area_info info;
80646+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
80647
80648 if (len > TASK_SIZE)
80649 return -ENOMEM;
80650@@ -1783,17 +1990,26 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80651 if (flags & MAP_FIXED)
80652 return addr;
80653
80654+#ifdef CONFIG_PAX_RANDMMAP
80655+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
80656+#endif
80657+
80658 if (addr) {
80659 addr = PAGE_ALIGN(addr);
80660 vma = find_vma(mm, addr);
80661- if (TASK_SIZE - len >= addr &&
80662- (!vma || addr + len <= vma->vm_start))
80663+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
80664 return addr;
80665 }
80666
80667 info.flags = 0;
80668 info.length = len;
80669 info.low_limit = TASK_UNMAPPED_BASE;
80670+
80671+#ifdef CONFIG_PAX_RANDMMAP
80672+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80673+ info.low_limit += mm->delta_mmap;
80674+#endif
80675+
80676 info.high_limit = TASK_SIZE;
80677 info.align_mask = 0;
80678 return vm_unmapped_area(&info);
80679@@ -1802,10 +2018,16 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
80680
80681 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
80682 {
80683+
80684+#ifdef CONFIG_PAX_SEGMEXEC
80685+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
80686+ return;
80687+#endif
80688+
80689 /*
80690 * Is this a new hole at the lowest possible address?
80691 */
80692- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
80693+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
80694 mm->free_area_cache = addr;
80695 }
80696
80697@@ -1823,6 +2045,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80698 struct mm_struct *mm = current->mm;
80699 unsigned long addr = addr0;
80700 struct vm_unmapped_area_info info;
80701+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
80702
80703 /* requested length too big for entire address space */
80704 if (len > TASK_SIZE)
80705@@ -1831,12 +2054,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80706 if (flags & MAP_FIXED)
80707 return addr;
80708
80709+#ifdef CONFIG_PAX_RANDMMAP
80710+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
80711+#endif
80712+
80713 /* requesting a specific address */
80714 if (addr) {
80715 addr = PAGE_ALIGN(addr);
80716 vma = find_vma(mm, addr);
80717- if (TASK_SIZE - len >= addr &&
80718- (!vma || addr + len <= vma->vm_start))
80719+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
80720 return addr;
80721 }
80722
80723@@ -1857,6 +2083,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80724 VM_BUG_ON(addr != -ENOMEM);
80725 info.flags = 0;
80726 info.low_limit = TASK_UNMAPPED_BASE;
80727+
80728+#ifdef CONFIG_PAX_RANDMMAP
80729+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80730+ info.low_limit += mm->delta_mmap;
80731+#endif
80732+
80733 info.high_limit = TASK_SIZE;
80734 addr = vm_unmapped_area(&info);
80735 }
80736@@ -1867,6 +2099,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
80737
80738 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
80739 {
80740+
80741+#ifdef CONFIG_PAX_SEGMEXEC
80742+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
80743+ return;
80744+#endif
80745+
80746 /*
80747 * Is this a new hole at the highest possible address?
80748 */
80749@@ -1874,8 +2112,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
80750 mm->free_area_cache = addr;
80751
80752 /* dont allow allocations above current base */
80753- if (mm->free_area_cache > mm->mmap_base)
80754+ if (mm->free_area_cache > mm->mmap_base) {
80755 mm->free_area_cache = mm->mmap_base;
80756+ mm->cached_hole_size = ~0UL;
80757+ }
80758 }
80759
80760 unsigned long
80761@@ -1974,6 +2214,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
80762 return vma;
80763 }
80764
80765+#ifdef CONFIG_PAX_SEGMEXEC
80766+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
80767+{
80768+ struct vm_area_struct *vma_m;
80769+
80770+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
80771+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
80772+ BUG_ON(vma->vm_mirror);
80773+ return NULL;
80774+ }
80775+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
80776+ vma_m = vma->vm_mirror;
80777+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
80778+ BUG_ON(vma->vm_file != vma_m->vm_file);
80779+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
80780+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
80781+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
80782+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
80783+ return vma_m;
80784+}
80785+#endif
80786+
80787 /*
80788 * Verify that the stack growth is acceptable and
80789 * update accounting. This is shared with both the
80790@@ -1990,6 +2252,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
80791 return -ENOMEM;
80792
80793 /* Stack limit test */
80794+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
80795 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
80796 return -ENOMEM;
80797
80798@@ -2000,6 +2263,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
80799 locked = mm->locked_vm + grow;
80800 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
80801 limit >>= PAGE_SHIFT;
80802+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
80803 if (locked > limit && !capable(CAP_IPC_LOCK))
80804 return -ENOMEM;
80805 }
80806@@ -2029,37 +2293,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
80807 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
80808 * vma is the last one with address > vma->vm_end. Have to extend vma.
80809 */
80810+#ifndef CONFIG_IA64
80811+static
80812+#endif
80813 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
80814 {
80815 int error;
80816+ bool locknext;
80817
80818 if (!(vma->vm_flags & VM_GROWSUP))
80819 return -EFAULT;
80820
80821+ /* Also guard against wrapping around to address 0. */
80822+ if (address < PAGE_ALIGN(address+1))
80823+ address = PAGE_ALIGN(address+1);
80824+ else
80825+ return -ENOMEM;
80826+
80827 /*
80828 * We must make sure the anon_vma is allocated
80829 * so that the anon_vma locking is not a noop.
80830 */
80831 if (unlikely(anon_vma_prepare(vma)))
80832 return -ENOMEM;
80833+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
80834+ if (locknext && anon_vma_prepare(vma->vm_next))
80835+ return -ENOMEM;
80836 vma_lock_anon_vma(vma);
80837+ if (locknext)
80838+ vma_lock_anon_vma(vma->vm_next);
80839
80840 /*
80841 * vma->vm_start/vm_end cannot change under us because the caller
80842 * is required to hold the mmap_sem in read mode. We need the
80843- * anon_vma lock to serialize against concurrent expand_stacks.
80844- * Also guard against wrapping around to address 0.
80845+ * anon_vma locks to serialize against concurrent expand_stacks
80846+ * and expand_upwards.
80847 */
80848- if (address < PAGE_ALIGN(address+4))
80849- address = PAGE_ALIGN(address+4);
80850- else {
80851- vma_unlock_anon_vma(vma);
80852- return -ENOMEM;
80853- }
80854 error = 0;
80855
80856 /* Somebody else might have raced and expanded it already */
80857- if (address > vma->vm_end) {
80858+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
80859+ error = -ENOMEM;
80860+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
80861 unsigned long size, grow;
80862
80863 size = address - vma->vm_start;
80864@@ -2094,6 +2369,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
80865 }
80866 }
80867 }
80868+ if (locknext)
80869+ vma_unlock_anon_vma(vma->vm_next);
80870 vma_unlock_anon_vma(vma);
80871 khugepaged_enter_vma_merge(vma);
80872 validate_mm(vma->vm_mm);
80873@@ -2108,6 +2385,8 @@ int expand_downwards(struct vm_area_struct *vma,
80874 unsigned long address)
80875 {
80876 int error;
80877+ bool lockprev = false;
80878+ struct vm_area_struct *prev;
80879
80880 /*
80881 * We must make sure the anon_vma is allocated
80882@@ -2121,6 +2400,15 @@ int expand_downwards(struct vm_area_struct *vma,
80883 if (error)
80884 return error;
80885
80886+ prev = vma->vm_prev;
80887+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
80888+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
80889+#endif
80890+ if (lockprev && anon_vma_prepare(prev))
80891+ return -ENOMEM;
80892+ if (lockprev)
80893+ vma_lock_anon_vma(prev);
80894+
80895 vma_lock_anon_vma(vma);
80896
80897 /*
80898@@ -2130,9 +2418,17 @@ int expand_downwards(struct vm_area_struct *vma,
80899 */
80900
80901 /* Somebody else might have raced and expanded it already */
80902- if (address < vma->vm_start) {
80903+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
80904+ error = -ENOMEM;
80905+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
80906 unsigned long size, grow;
80907
80908+#ifdef CONFIG_PAX_SEGMEXEC
80909+ struct vm_area_struct *vma_m;
80910+
80911+ vma_m = pax_find_mirror_vma(vma);
80912+#endif
80913+
80914 size = vma->vm_end - address;
80915 grow = (vma->vm_start - address) >> PAGE_SHIFT;
80916
80917@@ -2157,6 +2453,18 @@ int expand_downwards(struct vm_area_struct *vma,
80918 vma->vm_pgoff -= grow;
80919 anon_vma_interval_tree_post_update_vma(vma);
80920 vma_gap_update(vma);
80921+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
80922+
80923+#ifdef CONFIG_PAX_SEGMEXEC
80924+ if (vma_m) {
80925+ anon_vma_interval_tree_pre_update_vma(vma_m);
80926+ vma_m->vm_start -= grow << PAGE_SHIFT;
80927+ vma_m->vm_pgoff -= grow;
80928+ anon_vma_interval_tree_post_update_vma(vma_m);
80929+ vma_gap_update(vma_m);
80930+ }
80931+#endif
80932+
80933 spin_unlock(&vma->vm_mm->page_table_lock);
80934
80935 perf_event_mmap(vma);
80936@@ -2263,6 +2571,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
80937 do {
80938 long nrpages = vma_pages(vma);
80939
80940+#ifdef CONFIG_PAX_SEGMEXEC
80941+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
80942+ vma = remove_vma(vma);
80943+ continue;
80944+ }
80945+#endif
80946+
80947 if (vma->vm_flags & VM_ACCOUNT)
80948 nr_accounted += nrpages;
80949 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
80950@@ -2308,6 +2623,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
80951 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
80952 vma->vm_prev = NULL;
80953 do {
80954+
80955+#ifdef CONFIG_PAX_SEGMEXEC
80956+ if (vma->vm_mirror) {
80957+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
80958+ vma->vm_mirror->vm_mirror = NULL;
80959+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
80960+ vma->vm_mirror = NULL;
80961+ }
80962+#endif
80963+
80964 vma_rb_erase(vma, &mm->mm_rb);
80965 mm->map_count--;
80966 tail_vma = vma;
80967@@ -2339,14 +2664,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
80968 struct vm_area_struct *new;
80969 int err = -ENOMEM;
80970
80971+#ifdef CONFIG_PAX_SEGMEXEC
80972+ struct vm_area_struct *vma_m, *new_m = NULL;
80973+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
80974+#endif
80975+
80976 if (is_vm_hugetlb_page(vma) && (addr &
80977 ~(huge_page_mask(hstate_vma(vma)))))
80978 return -EINVAL;
80979
80980+#ifdef CONFIG_PAX_SEGMEXEC
80981+ vma_m = pax_find_mirror_vma(vma);
80982+#endif
80983+
80984 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80985 if (!new)
80986 goto out_err;
80987
80988+#ifdef CONFIG_PAX_SEGMEXEC
80989+ if (vma_m) {
80990+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
80991+ if (!new_m) {
80992+ kmem_cache_free(vm_area_cachep, new);
80993+ goto out_err;
80994+ }
80995+ }
80996+#endif
80997+
80998 /* most fields are the same, copy all, and then fixup */
80999 *new = *vma;
81000
81001@@ -2359,6 +2703,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81002 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
81003 }
81004
81005+#ifdef CONFIG_PAX_SEGMEXEC
81006+ if (vma_m) {
81007+ *new_m = *vma_m;
81008+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
81009+ new_m->vm_mirror = new;
81010+ new->vm_mirror = new_m;
81011+
81012+ if (new_below)
81013+ new_m->vm_end = addr_m;
81014+ else {
81015+ new_m->vm_start = addr_m;
81016+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
81017+ }
81018+ }
81019+#endif
81020+
81021 pol = mpol_dup(vma_policy(vma));
81022 if (IS_ERR(pol)) {
81023 err = PTR_ERR(pol);
81024@@ -2381,6 +2741,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81025 else
81026 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
81027
81028+#ifdef CONFIG_PAX_SEGMEXEC
81029+ if (!err && vma_m) {
81030+ if (anon_vma_clone(new_m, vma_m))
81031+ goto out_free_mpol;
81032+
81033+ mpol_get(pol);
81034+ vma_set_policy(new_m, pol);
81035+
81036+ if (new_m->vm_file)
81037+ get_file(new_m->vm_file);
81038+
81039+ if (new_m->vm_ops && new_m->vm_ops->open)
81040+ new_m->vm_ops->open(new_m);
81041+
81042+ if (new_below)
81043+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
81044+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
81045+ else
81046+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
81047+
81048+ if (err) {
81049+ if (new_m->vm_ops && new_m->vm_ops->close)
81050+ new_m->vm_ops->close(new_m);
81051+ if (new_m->vm_file)
81052+ fput(new_m->vm_file);
81053+ mpol_put(pol);
81054+ }
81055+ }
81056+#endif
81057+
81058 /* Success. */
81059 if (!err)
81060 return 0;
81061@@ -2390,10 +2780,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81062 new->vm_ops->close(new);
81063 if (new->vm_file)
81064 fput(new->vm_file);
81065- unlink_anon_vmas(new);
81066 out_free_mpol:
81067 mpol_put(pol);
81068 out_free_vma:
81069+
81070+#ifdef CONFIG_PAX_SEGMEXEC
81071+ if (new_m) {
81072+ unlink_anon_vmas(new_m);
81073+ kmem_cache_free(vm_area_cachep, new_m);
81074+ }
81075+#endif
81076+
81077+ unlink_anon_vmas(new);
81078 kmem_cache_free(vm_area_cachep, new);
81079 out_err:
81080 return err;
81081@@ -2406,6 +2804,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
81082 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81083 unsigned long addr, int new_below)
81084 {
81085+
81086+#ifdef CONFIG_PAX_SEGMEXEC
81087+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
81088+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
81089+ if (mm->map_count >= sysctl_max_map_count-1)
81090+ return -ENOMEM;
81091+ } else
81092+#endif
81093+
81094 if (mm->map_count >= sysctl_max_map_count)
81095 return -ENOMEM;
81096
81097@@ -2417,11 +2824,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81098 * work. This now handles partial unmappings.
81099 * Jeremy Fitzhardinge <jeremy@goop.org>
81100 */
81101+#ifdef CONFIG_PAX_SEGMEXEC
81102 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81103 {
81104+ int ret = __do_munmap(mm, start, len);
81105+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
81106+ return ret;
81107+
81108+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
81109+}
81110+
81111+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81112+#else
81113+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81114+#endif
81115+{
81116 unsigned long end;
81117 struct vm_area_struct *vma, *prev, *last;
81118
81119+ /*
81120+ * mm->mmap_sem is required to protect against another thread
81121+ * changing the mappings in case we sleep.
81122+ */
81123+ verify_mm_writelocked(mm);
81124+
81125 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
81126 return -EINVAL;
81127
81128@@ -2496,6 +2922,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
81129 /* Fix up all other VM information */
81130 remove_vma_list(mm, vma);
81131
81132+ track_exec_limit(mm, start, end, 0UL);
81133+
81134 return 0;
81135 }
81136
81137@@ -2504,6 +2932,13 @@ int vm_munmap(unsigned long start, size_t len)
81138 int ret;
81139 struct mm_struct *mm = current->mm;
81140
81141+
81142+#ifdef CONFIG_PAX_SEGMEXEC
81143+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
81144+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
81145+ return -EINVAL;
81146+#endif
81147+
81148 down_write(&mm->mmap_sem);
81149 ret = do_munmap(mm, start, len);
81150 up_write(&mm->mmap_sem);
81151@@ -2517,16 +2952,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
81152 return vm_munmap(addr, len);
81153 }
81154
81155-static inline void verify_mm_writelocked(struct mm_struct *mm)
81156-{
81157-#ifdef CONFIG_DEBUG_VM
81158- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
81159- WARN_ON(1);
81160- up_read(&mm->mmap_sem);
81161- }
81162-#endif
81163-}
81164-
81165 /*
81166 * this is really a simplified "do_mmap". it only handles
81167 * anonymous maps. eventually we may be able to do some
81168@@ -2540,6 +2965,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81169 struct rb_node ** rb_link, * rb_parent;
81170 pgoff_t pgoff = addr >> PAGE_SHIFT;
81171 int error;
81172+ unsigned long charged;
81173
81174 len = PAGE_ALIGN(len);
81175 if (!len)
81176@@ -2547,16 +2973,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81177
81178 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
81179
81180+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
81181+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
81182+ flags &= ~VM_EXEC;
81183+
81184+#ifdef CONFIG_PAX_MPROTECT
81185+ if (mm->pax_flags & MF_PAX_MPROTECT)
81186+ flags &= ~VM_MAYEXEC;
81187+#endif
81188+
81189+ }
81190+#endif
81191+
81192 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
81193 if (error & ~PAGE_MASK)
81194 return error;
81195
81196+ charged = len >> PAGE_SHIFT;
81197+
81198 /*
81199 * mlock MCL_FUTURE?
81200 */
81201 if (mm->def_flags & VM_LOCKED) {
81202 unsigned long locked, lock_limit;
81203- locked = len >> PAGE_SHIFT;
81204+ locked = charged;
81205 locked += mm->locked_vm;
81206 lock_limit = rlimit(RLIMIT_MEMLOCK);
81207 lock_limit >>= PAGE_SHIFT;
81208@@ -2573,21 +3013,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81209 /*
81210 * Clear old maps. this also does some error checking for us
81211 */
81212- munmap_back:
81213 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
81214 if (do_munmap(mm, addr, len))
81215 return -ENOMEM;
81216- goto munmap_back;
81217+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
81218 }
81219
81220 /* Check against address space limits *after* clearing old maps... */
81221- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
81222+ if (!may_expand_vm(mm, charged))
81223 return -ENOMEM;
81224
81225 if (mm->map_count > sysctl_max_map_count)
81226 return -ENOMEM;
81227
81228- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
81229+ if (security_vm_enough_memory_mm(mm, charged))
81230 return -ENOMEM;
81231
81232 /* Can we just expand an old private anonymous mapping? */
81233@@ -2601,7 +3040,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81234 */
81235 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81236 if (!vma) {
81237- vm_unacct_memory(len >> PAGE_SHIFT);
81238+ vm_unacct_memory(charged);
81239 return -ENOMEM;
81240 }
81241
81242@@ -2615,11 +3054,12 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
81243 vma_link(mm, vma, prev, rb_link, rb_parent);
81244 out:
81245 perf_event_mmap(vma);
81246- mm->total_vm += len >> PAGE_SHIFT;
81247+ mm->total_vm += charged;
81248 if (flags & VM_LOCKED) {
81249 if (!mlock_vma_pages_range(vma, addr, addr + len))
81250- mm->locked_vm += (len >> PAGE_SHIFT);
81251+ mm->locked_vm += charged;
81252 }
81253+ track_exec_limit(mm, addr, addr + len, flags);
81254 return addr;
81255 }
81256
81257@@ -2677,6 +3117,7 @@ void exit_mmap(struct mm_struct *mm)
81258 while (vma) {
81259 if (vma->vm_flags & VM_ACCOUNT)
81260 nr_accounted += vma_pages(vma);
81261+ vma->vm_mirror = NULL;
81262 vma = remove_vma(vma);
81263 }
81264 vm_unacct_memory(nr_accounted);
81265@@ -2693,6 +3134,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
81266 struct vm_area_struct *prev;
81267 struct rb_node **rb_link, *rb_parent;
81268
81269+#ifdef CONFIG_PAX_SEGMEXEC
81270+ struct vm_area_struct *vma_m = NULL;
81271+#endif
81272+
81273+ if (security_mmap_addr(vma->vm_start))
81274+ return -EPERM;
81275+
81276 /*
81277 * The vm_pgoff of a purely anonymous vma should be irrelevant
81278 * until its first write fault, when page's anon_vma and index
81279@@ -2716,7 +3164,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
81280 security_vm_enough_memory_mm(mm, vma_pages(vma)))
81281 return -ENOMEM;
81282
81283+#ifdef CONFIG_PAX_SEGMEXEC
81284+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
81285+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81286+ if (!vma_m)
81287+ return -ENOMEM;
81288+ }
81289+#endif
81290+
81291 vma_link(mm, vma, prev, rb_link, rb_parent);
81292+
81293+#ifdef CONFIG_PAX_SEGMEXEC
81294+ if (vma_m)
81295+ BUG_ON(pax_mirror_vma(vma_m, vma));
81296+#endif
81297+
81298 return 0;
81299 }
81300
81301@@ -2736,6 +3198,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
81302 struct mempolicy *pol;
81303 bool faulted_in_anon_vma = true;
81304
81305+ BUG_ON(vma->vm_mirror);
81306+
81307 /*
81308 * If anonymous vma has not yet been faulted, update new pgoff
81309 * to match new location, to increase its chance of merging.
81310@@ -2802,6 +3266,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
81311 return NULL;
81312 }
81313
81314+#ifdef CONFIG_PAX_SEGMEXEC
81315+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
81316+{
81317+ struct vm_area_struct *prev_m;
81318+ struct rb_node **rb_link_m, *rb_parent_m;
81319+ struct mempolicy *pol_m;
81320+
81321+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
81322+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
81323+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
81324+ *vma_m = *vma;
81325+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
81326+ if (anon_vma_clone(vma_m, vma))
81327+ return -ENOMEM;
81328+ pol_m = vma_policy(vma_m);
81329+ mpol_get(pol_m);
81330+ vma_set_policy(vma_m, pol_m);
81331+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
81332+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
81333+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
81334+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
81335+ if (vma_m->vm_file)
81336+ get_file(vma_m->vm_file);
81337+ if (vma_m->vm_ops && vma_m->vm_ops->open)
81338+ vma_m->vm_ops->open(vma_m);
81339+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
81340+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
81341+ vma_m->vm_mirror = vma;
81342+ vma->vm_mirror = vma_m;
81343+ return 0;
81344+}
81345+#endif
81346+
81347 /*
81348 * Return true if the calling process may expand its vm space by the passed
81349 * number of pages
81350@@ -2813,6 +3310,12 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
81351
81352 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
81353
81354+#ifdef CONFIG_PAX_RANDMMAP
81355+ if (mm->pax_flags & MF_PAX_RANDMMAP)
81356+ cur -= mm->brk_gap;
81357+#endif
81358+
81359+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
81360 if (cur + npages > lim)
81361 return 0;
81362 return 1;
81363@@ -2883,6 +3386,22 @@ int install_special_mapping(struct mm_struct *mm,
81364 vma->vm_start = addr;
81365 vma->vm_end = addr + len;
81366
81367+#ifdef CONFIG_PAX_MPROTECT
81368+ if (mm->pax_flags & MF_PAX_MPROTECT) {
81369+#ifndef CONFIG_PAX_MPROTECT_COMPAT
81370+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
81371+ return -EPERM;
81372+ if (!(vm_flags & VM_EXEC))
81373+ vm_flags &= ~VM_MAYEXEC;
81374+#else
81375+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
81376+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
81377+#endif
81378+ else
81379+ vm_flags &= ~VM_MAYWRITE;
81380+ }
81381+#endif
81382+
81383 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
81384 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
81385
81386diff --git a/mm/mprotect.c b/mm/mprotect.c
81387index 94722a4..9837984 100644
81388--- a/mm/mprotect.c
81389+++ b/mm/mprotect.c
81390@@ -23,10 +23,17 @@
81391 #include <linux/mmu_notifier.h>
81392 #include <linux/migrate.h>
81393 #include <linux/perf_event.h>
81394+
81395+#ifdef CONFIG_PAX_MPROTECT
81396+#include <linux/elf.h>
81397+#include <linux/binfmts.h>
81398+#endif
81399+
81400 #include <asm/uaccess.h>
81401 #include <asm/pgtable.h>
81402 #include <asm/cacheflush.h>
81403 #include <asm/tlbflush.h>
81404+#include <asm/mmu_context.h>
81405
81406 #ifndef pgprot_modify
81407 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
81408@@ -233,6 +240,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
81409 return pages;
81410 }
81411
81412+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
81413+/* called while holding the mmap semaphor for writing except stack expansion */
81414+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
81415+{
81416+ unsigned long oldlimit, newlimit = 0UL;
81417+
81418+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
81419+ return;
81420+
81421+ spin_lock(&mm->page_table_lock);
81422+ oldlimit = mm->context.user_cs_limit;
81423+ if ((prot & VM_EXEC) && oldlimit < end)
81424+ /* USER_CS limit moved up */
81425+ newlimit = end;
81426+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
81427+ /* USER_CS limit moved down */
81428+ newlimit = start;
81429+
81430+ if (newlimit) {
81431+ mm->context.user_cs_limit = newlimit;
81432+
81433+#ifdef CONFIG_SMP
81434+ wmb();
81435+ cpus_clear(mm->context.cpu_user_cs_mask);
81436+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
81437+#endif
81438+
81439+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
81440+ }
81441+ spin_unlock(&mm->page_table_lock);
81442+ if (newlimit == end) {
81443+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
81444+
81445+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
81446+ if (is_vm_hugetlb_page(vma))
81447+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
81448+ else
81449+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
81450+ }
81451+}
81452+#endif
81453+
81454 int
81455 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
81456 unsigned long start, unsigned long end, unsigned long newflags)
81457@@ -245,11 +294,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
81458 int error;
81459 int dirty_accountable = 0;
81460
81461+#ifdef CONFIG_PAX_SEGMEXEC
81462+ struct vm_area_struct *vma_m = NULL;
81463+ unsigned long start_m, end_m;
81464+
81465+ start_m = start + SEGMEXEC_TASK_SIZE;
81466+ end_m = end + SEGMEXEC_TASK_SIZE;
81467+#endif
81468+
81469 if (newflags == oldflags) {
81470 *pprev = vma;
81471 return 0;
81472 }
81473
81474+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
81475+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
81476+
81477+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
81478+ return -ENOMEM;
81479+
81480+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
81481+ return -ENOMEM;
81482+ }
81483+
81484 /*
81485 * If we make a private mapping writable we increase our commit;
81486 * but (without finer accounting) cannot reduce our commit if we
81487@@ -266,6 +333,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
81488 }
81489 }
81490
81491+#ifdef CONFIG_PAX_SEGMEXEC
81492+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
81493+ if (start != vma->vm_start) {
81494+ error = split_vma(mm, vma, start, 1);
81495+ if (error)
81496+ goto fail;
81497+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
81498+ *pprev = (*pprev)->vm_next;
81499+ }
81500+
81501+ if (end != vma->vm_end) {
81502+ error = split_vma(mm, vma, end, 0);
81503+ if (error)
81504+ goto fail;
81505+ }
81506+
81507+ if (pax_find_mirror_vma(vma)) {
81508+ error = __do_munmap(mm, start_m, end_m - start_m);
81509+ if (error)
81510+ goto fail;
81511+ } else {
81512+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
81513+ if (!vma_m) {
81514+ error = -ENOMEM;
81515+ goto fail;
81516+ }
81517+ vma->vm_flags = newflags;
81518+ error = pax_mirror_vma(vma_m, vma);
81519+ if (error) {
81520+ vma->vm_flags = oldflags;
81521+ goto fail;
81522+ }
81523+ }
81524+ }
81525+#endif
81526+
81527 /*
81528 * First try to merge with previous and/or next vma.
81529 */
81530@@ -296,9 +399,21 @@ success:
81531 * vm_flags and vm_page_prot are protected by the mmap_sem
81532 * held in write mode.
81533 */
81534+
81535+#ifdef CONFIG_PAX_SEGMEXEC
81536+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
81537+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
81538+#endif
81539+
81540 vma->vm_flags = newflags;
81541+
81542+#ifdef CONFIG_PAX_MPROTECT
81543+ if (mm->binfmt && mm->binfmt->handle_mprotect)
81544+ mm->binfmt->handle_mprotect(vma, newflags);
81545+#endif
81546+
81547 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
81548- vm_get_page_prot(newflags));
81549+ vm_get_page_prot(vma->vm_flags));
81550
81551 if (vma_wants_writenotify(vma)) {
81552 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
81553@@ -337,6 +452,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81554 end = start + len;
81555 if (end <= start)
81556 return -ENOMEM;
81557+
81558+#ifdef CONFIG_PAX_SEGMEXEC
81559+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
81560+ if (end > SEGMEXEC_TASK_SIZE)
81561+ return -EINVAL;
81562+ } else
81563+#endif
81564+
81565+ if (end > TASK_SIZE)
81566+ return -EINVAL;
81567+
81568 if (!arch_validate_prot(prot))
81569 return -EINVAL;
81570
81571@@ -344,7 +470,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81572 /*
81573 * Does the application expect PROT_READ to imply PROT_EXEC:
81574 */
81575- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
81576+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
81577 prot |= PROT_EXEC;
81578
81579 vm_flags = calc_vm_prot_bits(prot);
81580@@ -376,6 +502,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81581 if (start > vma->vm_start)
81582 prev = vma;
81583
81584+#ifdef CONFIG_PAX_MPROTECT
81585+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
81586+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
81587+#endif
81588+
81589 for (nstart = start ; ; ) {
81590 unsigned long newflags;
81591
81592@@ -386,6 +517,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81593
81594 /* newflags >> 4 shift VM_MAY% in place of VM_% */
81595 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
81596+ if (prot & (PROT_WRITE | PROT_EXEC))
81597+ gr_log_rwxmprotect(vma->vm_file);
81598+
81599+ error = -EACCES;
81600+ goto out;
81601+ }
81602+
81603+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
81604 error = -EACCES;
81605 goto out;
81606 }
81607@@ -400,6 +539,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
81608 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
81609 if (error)
81610 goto out;
81611+
81612+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
81613+
81614 nstart = tmp;
81615
81616 if (nstart < prev->vm_end)
81617diff --git a/mm/mremap.c b/mm/mremap.c
81618index e1031e1..1f2a0a1 100644
81619--- a/mm/mremap.c
81620+++ b/mm/mremap.c
81621@@ -125,6 +125,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
81622 continue;
81623 pte = ptep_get_and_clear(mm, old_addr, old_pte);
81624 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
81625+
81626+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
81627+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
81628+ pte = pte_exprotect(pte);
81629+#endif
81630+
81631 set_pte_at(mm, new_addr, new_pte, pte);
81632 }
81633
81634@@ -319,6 +325,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
81635 if (is_vm_hugetlb_page(vma))
81636 goto Einval;
81637
81638+#ifdef CONFIG_PAX_SEGMEXEC
81639+ if (pax_find_mirror_vma(vma))
81640+ goto Einval;
81641+#endif
81642+
81643 /* We can't remap across vm area boundaries */
81644 if (old_len > vma->vm_end - addr)
81645 goto Efault;
81646@@ -375,20 +386,25 @@ static unsigned long mremap_to(unsigned long addr,
81647 unsigned long ret = -EINVAL;
81648 unsigned long charged = 0;
81649 unsigned long map_flags;
81650+ unsigned long pax_task_size = TASK_SIZE;
81651
81652 if (new_addr & ~PAGE_MASK)
81653 goto out;
81654
81655- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
81656+#ifdef CONFIG_PAX_SEGMEXEC
81657+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
81658+ pax_task_size = SEGMEXEC_TASK_SIZE;
81659+#endif
81660+
81661+ pax_task_size -= PAGE_SIZE;
81662+
81663+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
81664 goto out;
81665
81666 /* Check if the location we're moving into overlaps the
81667 * old location at all, and fail if it does.
81668 */
81669- if ((new_addr <= addr) && (new_addr+new_len) > addr)
81670- goto out;
81671-
81672- if ((addr <= new_addr) && (addr+old_len) > new_addr)
81673+ if (addr + old_len > new_addr && new_addr + new_len > addr)
81674 goto out;
81675
81676 ret = do_munmap(mm, new_addr, new_len);
81677@@ -456,6 +472,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81678 struct vm_area_struct *vma;
81679 unsigned long ret = -EINVAL;
81680 unsigned long charged = 0;
81681+ unsigned long pax_task_size = TASK_SIZE;
81682
81683 down_write(&current->mm->mmap_sem);
81684
81685@@ -476,6 +493,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81686 if (!new_len)
81687 goto out;
81688
81689+#ifdef CONFIG_PAX_SEGMEXEC
81690+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
81691+ pax_task_size = SEGMEXEC_TASK_SIZE;
81692+#endif
81693+
81694+ pax_task_size -= PAGE_SIZE;
81695+
81696+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
81697+ old_len > pax_task_size || addr > pax_task_size-old_len)
81698+ goto out;
81699+
81700 if (flags & MREMAP_FIXED) {
81701 if (flags & MREMAP_MAYMOVE)
81702 ret = mremap_to(addr, old_len, new_addr, new_len);
81703@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81704 addr + new_len);
81705 }
81706 ret = addr;
81707+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
81708 goto out;
81709 }
81710 }
81711@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
81712 goto out;
81713 }
81714
81715+ map_flags = vma->vm_flags;
81716 ret = move_vma(vma, addr, old_len, new_len, new_addr);
81717+ if (!(ret & ~PAGE_MASK)) {
81718+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
81719+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
81720+ }
81721 }
81722 out:
81723 if (ret & ~PAGE_MASK)
81724diff --git a/mm/nommu.c b/mm/nommu.c
81725index 79c3cac..4d357e0 100644
81726--- a/mm/nommu.c
81727+++ b/mm/nommu.c
81728@@ -62,7 +62,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
81729 int sysctl_overcommit_ratio = 50; /* default is 50% */
81730 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
81731 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
81732-int heap_stack_gap = 0;
81733
81734 atomic_long_t mmap_pages_allocated;
81735
81736@@ -839,15 +838,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
81737 EXPORT_SYMBOL(find_vma);
81738
81739 /*
81740- * find a VMA
81741- * - we don't extend stack VMAs under NOMMU conditions
81742- */
81743-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
81744-{
81745- return find_vma(mm, addr);
81746-}
81747-
81748-/*
81749 * expand a stack to a given address
81750 * - not supported under NOMMU conditions
81751 */
81752@@ -1555,6 +1545,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
81753
81754 /* most fields are the same, copy all, and then fixup */
81755 *new = *vma;
81756+ INIT_LIST_HEAD(&new->anon_vma_chain);
81757 *region = *vma->vm_region;
81758 new->vm_region = region;
81759
81760diff --git a/mm/page-writeback.c b/mm/page-writeback.c
81761index 0713bfb..e3774e0 100644
81762--- a/mm/page-writeback.c
81763+++ b/mm/page-writeback.c
81764@@ -1630,7 +1630,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
81765 }
81766 }
81767
81768-static struct notifier_block __cpuinitdata ratelimit_nb = {
81769+static struct notifier_block ratelimit_nb = {
81770 .notifier_call = ratelimit_handler,
81771 .next = NULL,
81772 };
81773diff --git a/mm/page_alloc.c b/mm/page_alloc.c
81774index 6a83cd3..bc2dcb6 100644
81775--- a/mm/page_alloc.c
81776+++ b/mm/page_alloc.c
81777@@ -338,7 +338,7 @@ out:
81778 * This usage means that zero-order pages may not be compound.
81779 */
81780
81781-static void free_compound_page(struct page *page)
81782+void free_compound_page(struct page *page)
81783 {
81784 __free_pages_ok(page, compound_order(page));
81785 }
81786@@ -693,6 +693,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
81787 int i;
81788 int bad = 0;
81789
81790+#ifdef CONFIG_PAX_MEMORY_SANITIZE
81791+ unsigned long index = 1UL << order;
81792+#endif
81793+
81794 trace_mm_page_free(page, order);
81795 kmemcheck_free_shadow(page, order);
81796
81797@@ -708,6 +712,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
81798 debug_check_no_obj_freed(page_address(page),
81799 PAGE_SIZE << order);
81800 }
81801+
81802+#ifdef CONFIG_PAX_MEMORY_SANITIZE
81803+ for (; index; --index)
81804+ sanitize_highpage(page + index - 1);
81805+#endif
81806+
81807 arch_free_page(page, order);
81808 kernel_map_pages(page, 1 << order, 0);
81809
81810@@ -861,8 +871,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
81811 arch_alloc_page(page, order);
81812 kernel_map_pages(page, 1 << order, 1);
81813
81814+#ifndef CONFIG_PAX_MEMORY_SANITIZE
81815 if (gfp_flags & __GFP_ZERO)
81816 prep_zero_page(page, order, gfp_flags);
81817+#endif
81818
81819 if (order && (gfp_flags & __GFP_COMP))
81820 prep_compound_page(page, order);
81821@@ -3752,7 +3764,13 @@ static int pageblock_is_reserved(unsigned long start_pfn, unsigned long end_pfn)
81822 unsigned long pfn;
81823
81824 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
81825+#ifdef CONFIG_X86_32
81826+ /* boot failures in VMware 8 on 32bit vanilla since
81827+ this change */
81828+ if (!pfn_valid(pfn) || PageReserved(pfn_to_page(pfn)))
81829+#else
81830 if (!pfn_valid_within(pfn) || PageReserved(pfn_to_page(pfn)))
81831+#endif
81832 return 1;
81833 }
81834 return 0;
81835diff --git a/mm/percpu.c b/mm/percpu.c
81836index 8c8e08f..73a5cda 100644
81837--- a/mm/percpu.c
81838+++ b/mm/percpu.c
81839@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
81840 static unsigned int pcpu_high_unit_cpu __read_mostly;
81841
81842 /* the address of the first chunk which starts with the kernel static area */
81843-void *pcpu_base_addr __read_mostly;
81844+void *pcpu_base_addr __read_only;
81845 EXPORT_SYMBOL_GPL(pcpu_base_addr);
81846
81847 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
81848diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
81849index 926b466..b23df53 100644
81850--- a/mm/process_vm_access.c
81851+++ b/mm/process_vm_access.c
81852@@ -13,6 +13,7 @@
81853 #include <linux/uio.h>
81854 #include <linux/sched.h>
81855 #include <linux/highmem.h>
81856+#include <linux/security.h>
81857 #include <linux/ptrace.h>
81858 #include <linux/slab.h>
81859 #include <linux/syscalls.h>
81860@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
81861 size_t iov_l_curr_offset = 0;
81862 ssize_t iov_len;
81863
81864+ return -ENOSYS; // PaX: until properly audited
81865+
81866 /*
81867 * Work out how many pages of struct pages we're going to need
81868 * when eventually calling get_user_pages
81869 */
81870 for (i = 0; i < riovcnt; i++) {
81871 iov_len = rvec[i].iov_len;
81872- if (iov_len > 0) {
81873- nr_pages_iov = ((unsigned long)rvec[i].iov_base
81874- + iov_len)
81875- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
81876- / PAGE_SIZE + 1;
81877- nr_pages = max(nr_pages, nr_pages_iov);
81878- }
81879+ if (iov_len <= 0)
81880+ continue;
81881+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
81882+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
81883+ nr_pages = max(nr_pages, nr_pages_iov);
81884 }
81885
81886 if (nr_pages == 0)
81887@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
81888 goto free_proc_pages;
81889 }
81890
81891+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
81892+ rc = -EPERM;
81893+ goto put_task_struct;
81894+ }
81895+
81896 mm = mm_access(task, PTRACE_MODE_ATTACH);
81897 if (!mm || IS_ERR(mm)) {
81898 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
81899diff --git a/mm/rmap.c b/mm/rmap.c
81900index 2c78f8c..9e9c624 100644
81901--- a/mm/rmap.c
81902+++ b/mm/rmap.c
81903@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81904 struct anon_vma *anon_vma = vma->anon_vma;
81905 struct anon_vma_chain *avc;
81906
81907+#ifdef CONFIG_PAX_SEGMEXEC
81908+ struct anon_vma_chain *avc_m = NULL;
81909+#endif
81910+
81911 might_sleep();
81912 if (unlikely(!anon_vma)) {
81913 struct mm_struct *mm = vma->vm_mm;
81914@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81915 if (!avc)
81916 goto out_enomem;
81917
81918+#ifdef CONFIG_PAX_SEGMEXEC
81919+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
81920+ if (!avc_m)
81921+ goto out_enomem_free_avc;
81922+#endif
81923+
81924 anon_vma = find_mergeable_anon_vma(vma);
81925 allocated = NULL;
81926 if (!anon_vma) {
81927@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81928 /* page_table_lock to protect against threads */
81929 spin_lock(&mm->page_table_lock);
81930 if (likely(!vma->anon_vma)) {
81931+
81932+#ifdef CONFIG_PAX_SEGMEXEC
81933+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
81934+
81935+ if (vma_m) {
81936+ BUG_ON(vma_m->anon_vma);
81937+ vma_m->anon_vma = anon_vma;
81938+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
81939+ avc_m = NULL;
81940+ }
81941+#endif
81942+
81943 vma->anon_vma = anon_vma;
81944 anon_vma_chain_link(vma, avc, anon_vma);
81945 allocated = NULL;
81946@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
81947
81948 if (unlikely(allocated))
81949 put_anon_vma(allocated);
81950+
81951+#ifdef CONFIG_PAX_SEGMEXEC
81952+ if (unlikely(avc_m))
81953+ anon_vma_chain_free(avc_m);
81954+#endif
81955+
81956 if (unlikely(avc))
81957 anon_vma_chain_free(avc);
81958 }
81959 return 0;
81960
81961 out_enomem_free_avc:
81962+
81963+#ifdef CONFIG_PAX_SEGMEXEC
81964+ if (avc_m)
81965+ anon_vma_chain_free(avc_m);
81966+#endif
81967+
81968 anon_vma_chain_free(avc);
81969 out_enomem:
81970 return -ENOMEM;
81971@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
81972 * Attach the anon_vmas from src to dst.
81973 * Returns 0 on success, -ENOMEM on failure.
81974 */
81975-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
81976+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
81977 {
81978 struct anon_vma_chain *avc, *pavc;
81979 struct anon_vma *root = NULL;
81980@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
81981 * the corresponding VMA in the parent process is attached to.
81982 * Returns 0 on success, non-zero on failure.
81983 */
81984-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
81985+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
81986 {
81987 struct anon_vma_chain *avc;
81988 struct anon_vma *anon_vma;
81989diff --git a/mm/shmem.c b/mm/shmem.c
81990index efd0b3a..994b702 100644
81991--- a/mm/shmem.c
81992+++ b/mm/shmem.c
81993@@ -31,7 +31,7 @@
81994 #include <linux/export.h>
81995 #include <linux/swap.h>
81996
81997-static struct vfsmount *shm_mnt;
81998+struct vfsmount *shm_mnt;
81999
82000 #ifdef CONFIG_SHMEM
82001 /*
82002@@ -75,7 +75,7 @@ static struct vfsmount *shm_mnt;
82003 #define BOGO_DIRENT_SIZE 20
82004
82005 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
82006-#define SHORT_SYMLINK_LEN 128
82007+#define SHORT_SYMLINK_LEN 64
82008
82009 /*
82010 * shmem_fallocate and shmem_writepage communicate via inode->i_private
82011@@ -2202,6 +2202,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
82012 static int shmem_xattr_validate(const char *name)
82013 {
82014 struct { const char *prefix; size_t len; } arr[] = {
82015+
82016+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
82017+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
82018+#endif
82019+
82020 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
82021 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
82022 };
82023@@ -2257,6 +2262,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
82024 if (err)
82025 return err;
82026
82027+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
82028+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
82029+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
82030+ return -EOPNOTSUPP;
82031+ if (size > 8)
82032+ return -EINVAL;
82033+ }
82034+#endif
82035+
82036 return simple_xattr_set(&info->xattrs, name, value, size, flags);
82037 }
82038
82039@@ -2562,8 +2576,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
82040 int err = -ENOMEM;
82041
82042 /* Round up to L1_CACHE_BYTES to resist false sharing */
82043- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
82044- L1_CACHE_BYTES), GFP_KERNEL);
82045+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
82046 if (!sbinfo)
82047 return -ENOMEM;
82048
82049diff --git a/mm/slab.c b/mm/slab.c
82050index e7667a3..b62c169 100644
82051--- a/mm/slab.c
82052+++ b/mm/slab.c
82053@@ -306,7 +306,7 @@ struct kmem_list3 {
82054 * Need this for bootstrapping a per node allocator.
82055 */
82056 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
82057-static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
82058+static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
82059 #define CACHE_CACHE 0
82060 #define SIZE_AC MAX_NUMNODES
82061 #define SIZE_L3 (2 * MAX_NUMNODES)
82062@@ -407,10 +407,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
82063 if ((x)->max_freeable < i) \
82064 (x)->max_freeable = i; \
82065 } while (0)
82066-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
82067-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
82068-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
82069-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
82070+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
82071+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
82072+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
82073+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
82074 #else
82075 #define STATS_INC_ACTIVE(x) do { } while (0)
82076 #define STATS_DEC_ACTIVE(x) do { } while (0)
82077@@ -518,7 +518,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
82078 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
82079 */
82080 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
82081- const struct slab *slab, void *obj)
82082+ const struct slab *slab, const void *obj)
82083 {
82084 u32 offset = (obj - slab->s_mem);
82085 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
82086@@ -539,12 +539,13 @@ EXPORT_SYMBOL(malloc_sizes);
82087 struct cache_names {
82088 char *name;
82089 char *name_dma;
82090+ char *name_usercopy;
82091 };
82092
82093 static struct cache_names __initdata cache_names[] = {
82094-#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
82095+#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)", .name_usercopy = "size-" #x "(USERCOPY)" },
82096 #include <linux/kmalloc_sizes.h>
82097- {NULL,}
82098+ {NULL}
82099 #undef CACHE
82100 };
82101
82102@@ -729,6 +730,12 @@ static inline struct kmem_cache *__find_general_cachep(size_t size,
82103 if (unlikely(gfpflags & GFP_DMA))
82104 return csizep->cs_dmacachep;
82105 #endif
82106+
82107+#ifdef CONFIG_PAX_USERCOPY_SLABS
82108+ if (unlikely(gfpflags & GFP_USERCOPY))
82109+ return csizep->cs_usercopycachep;
82110+#endif
82111+
82112 return csizep->cs_cachep;
82113 }
82114
82115@@ -1482,7 +1489,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
82116 return notifier_from_errno(err);
82117 }
82118
82119-static struct notifier_block __cpuinitdata cpucache_notifier = {
82120+static struct notifier_block cpucache_notifier = {
82121 &cpuup_callback, NULL, 0
82122 };
82123
82124@@ -1667,12 +1674,12 @@ void __init kmem_cache_init(void)
82125 */
82126
82127 sizes[INDEX_AC].cs_cachep = create_kmalloc_cache(names[INDEX_AC].name,
82128- sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS);
82129+ sizes[INDEX_AC].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82130
82131 if (INDEX_AC != INDEX_L3)
82132 sizes[INDEX_L3].cs_cachep =
82133 create_kmalloc_cache(names[INDEX_L3].name,
82134- sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS);
82135+ sizes[INDEX_L3].cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82136
82137 slab_early_init = 0;
82138
82139@@ -1686,13 +1693,20 @@ void __init kmem_cache_init(void)
82140 */
82141 if (!sizes->cs_cachep)
82142 sizes->cs_cachep = create_kmalloc_cache(names->name,
82143- sizes->cs_size, ARCH_KMALLOC_FLAGS);
82144+ sizes->cs_size, ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82145
82146 #ifdef CONFIG_ZONE_DMA
82147 sizes->cs_dmacachep = create_kmalloc_cache(
82148 names->name_dma, sizes->cs_size,
82149 SLAB_CACHE_DMA|ARCH_KMALLOC_FLAGS);
82150 #endif
82151+
82152+#ifdef CONFIG_PAX_USERCOPY_SLABS
82153+ sizes->cs_usercopycachep = create_kmalloc_cache(
82154+ names->name_usercopy, sizes->cs_size,
82155+ ARCH_KMALLOC_FLAGS|SLAB_USERCOPY);
82156+#endif
82157+
82158 sizes++;
82159 names++;
82160 }
82161@@ -4365,10 +4379,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
82162 }
82163 /* cpu stats */
82164 {
82165- unsigned long allochit = atomic_read(&cachep->allochit);
82166- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
82167- unsigned long freehit = atomic_read(&cachep->freehit);
82168- unsigned long freemiss = atomic_read(&cachep->freemiss);
82169+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
82170+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
82171+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
82172+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
82173
82174 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
82175 allochit, allocmiss, freehit, freemiss);
82176@@ -4600,13 +4614,71 @@ static const struct file_operations proc_slabstats_operations = {
82177 static int __init slab_proc_init(void)
82178 {
82179 #ifdef CONFIG_DEBUG_SLAB_LEAK
82180- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
82181+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
82182 #endif
82183 return 0;
82184 }
82185 module_init(slab_proc_init);
82186 #endif
82187
82188+bool is_usercopy_object(const void *ptr)
82189+{
82190+ struct page *page;
82191+ struct kmem_cache *cachep;
82192+
82193+ if (ZERO_OR_NULL_PTR(ptr))
82194+ return false;
82195+
82196+ if (!slab_is_available())
82197+ return false;
82198+
82199+ if (!virt_addr_valid(ptr))
82200+ return false;
82201+
82202+ page = virt_to_head_page(ptr);
82203+
82204+ if (!PageSlab(page))
82205+ return false;
82206+
82207+ cachep = page->slab_cache;
82208+ return cachep->flags & SLAB_USERCOPY;
82209+}
82210+
82211+#ifdef CONFIG_PAX_USERCOPY
82212+const char *check_heap_object(const void *ptr, unsigned long n)
82213+{
82214+ struct page *page;
82215+ struct kmem_cache *cachep;
82216+ struct slab *slabp;
82217+ unsigned int objnr;
82218+ unsigned long offset;
82219+
82220+ if (ZERO_OR_NULL_PTR(ptr))
82221+ return "<null>";
82222+
82223+ if (!virt_addr_valid(ptr))
82224+ return NULL;
82225+
82226+ page = virt_to_head_page(ptr);
82227+
82228+ if (!PageSlab(page))
82229+ return NULL;
82230+
82231+ cachep = page->slab_cache;
82232+ if (!(cachep->flags & SLAB_USERCOPY))
82233+ return cachep->name;
82234+
82235+ slabp = page->slab_page;
82236+ objnr = obj_to_index(cachep, slabp, ptr);
82237+ BUG_ON(objnr >= cachep->num);
82238+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
82239+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
82240+ return NULL;
82241+
82242+ return cachep->name;
82243+}
82244+#endif
82245+
82246 /**
82247 * ksize - get the actual amount of memory allocated for a given object
82248 * @objp: Pointer to the object
82249diff --git a/mm/slab.h b/mm/slab.h
82250index 34a98d6..73633d1 100644
82251--- a/mm/slab.h
82252+++ b/mm/slab.h
82253@@ -58,7 +58,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
82254
82255 /* Legal flag mask for kmem_cache_create(), for various configurations */
82256 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
82257- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
82258+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
82259
82260 #if defined(CONFIG_DEBUG_SLAB)
82261 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
82262@@ -220,6 +220,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
82263 return s;
82264
82265 page = virt_to_head_page(x);
82266+
82267+ BUG_ON(!PageSlab(page));
82268+
82269 cachep = page->slab_cache;
82270 if (slab_equal_or_root(cachep, s))
82271 return cachep;
82272diff --git a/mm/slab_common.c b/mm/slab_common.c
82273index 3f3cd97..93b0236 100644
82274--- a/mm/slab_common.c
82275+++ b/mm/slab_common.c
82276@@ -22,7 +22,7 @@
82277
82278 #include "slab.h"
82279
82280-enum slab_state slab_state;
82281+enum slab_state slab_state __read_only;
82282 LIST_HEAD(slab_caches);
82283 DEFINE_MUTEX(slab_mutex);
82284 struct kmem_cache *kmem_cache;
82285@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
82286
82287 err = __kmem_cache_create(s, flags);
82288 if (!err) {
82289- s->refcount = 1;
82290+ atomic_set(&s->refcount, 1);
82291 list_add(&s->list, &slab_caches);
82292 memcg_cache_list_add(memcg, s);
82293 } else {
82294@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
82295
82296 get_online_cpus();
82297 mutex_lock(&slab_mutex);
82298- s->refcount--;
82299- if (!s->refcount) {
82300+ if (atomic_dec_and_test(&s->refcount)) {
82301 list_del(&s->list);
82302
82303 if (!__kmem_cache_shutdown(s)) {
82304@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
82305 panic("Creation of kmalloc slab %s size=%zd failed. Reason %d\n",
82306 name, size, err);
82307
82308- s->refcount = -1; /* Exempt from merging for now */
82309+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
82310 }
82311
82312 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
82313@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
82314
82315 create_boot_cache(s, name, size, flags);
82316 list_add(&s->list, &slab_caches);
82317- s->refcount = 1;
82318+ atomic_set(&s->refcount, 1);
82319 return s;
82320 }
82321
82322diff --git a/mm/slob.c b/mm/slob.c
82323index a99fdf7..f5b6577 100644
82324--- a/mm/slob.c
82325+++ b/mm/slob.c
82326@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
82327 /*
82328 * Return the size of a slob block.
82329 */
82330-static slobidx_t slob_units(slob_t *s)
82331+static slobidx_t slob_units(const slob_t *s)
82332 {
82333 if (s->units > 0)
82334 return s->units;
82335@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
82336 /*
82337 * Return the next free slob block pointer after this one.
82338 */
82339-static slob_t *slob_next(slob_t *s)
82340+static slob_t *slob_next(const slob_t *s)
82341 {
82342 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
82343 slobidx_t next;
82344@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
82345 /*
82346 * Returns true if s is the last free block in its page.
82347 */
82348-static int slob_last(slob_t *s)
82349+static int slob_last(const slob_t *s)
82350 {
82351 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
82352 }
82353
82354-static void *slob_new_pages(gfp_t gfp, int order, int node)
82355+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
82356 {
82357- void *page;
82358+ struct page *page;
82359
82360 #ifdef CONFIG_NUMA
82361 if (node != NUMA_NO_NODE)
82362@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
82363 if (!page)
82364 return NULL;
82365
82366- return page_address(page);
82367+ __SetPageSlab(page);
82368+ return page;
82369 }
82370
82371-static void slob_free_pages(void *b, int order)
82372+static void slob_free_pages(struct page *sp, int order)
82373 {
82374 if (current->reclaim_state)
82375 current->reclaim_state->reclaimed_slab += 1 << order;
82376- free_pages((unsigned long)b, order);
82377+ __ClearPageSlab(sp);
82378+ reset_page_mapcount(sp);
82379+ sp->private = 0;
82380+ __free_pages(sp, order);
82381 }
82382
82383 /*
82384@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
82385
82386 /* Not enough space: must allocate a new page */
82387 if (!b) {
82388- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
82389- if (!b)
82390+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
82391+ if (!sp)
82392 return NULL;
82393- sp = virt_to_page(b);
82394- __SetPageSlab(sp);
82395+ b = page_address(sp);
82396
82397 spin_lock_irqsave(&slob_lock, flags);
82398 sp->units = SLOB_UNITS(PAGE_SIZE);
82399 sp->freelist = b;
82400+ sp->private = 0;
82401 INIT_LIST_HEAD(&sp->list);
82402 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
82403 set_slob_page_free(sp, slob_list);
82404@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
82405 if (slob_page_free(sp))
82406 clear_slob_page_free(sp);
82407 spin_unlock_irqrestore(&slob_lock, flags);
82408- __ClearPageSlab(sp);
82409- reset_page_mapcount(sp);
82410- slob_free_pages(b, 0);
82411+ slob_free_pages(sp, 0);
82412 return;
82413 }
82414
82415@@ -424,11 +426,10 @@ out:
82416 */
82417
82418 static __always_inline void *
82419-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
82420+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
82421 {
82422- unsigned int *m;
82423- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82424- void *ret;
82425+ slob_t *m;
82426+ void *ret = NULL;
82427
82428 gfp &= gfp_allowed_mask;
82429
82430@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
82431
82432 if (!m)
82433 return NULL;
82434- *m = size;
82435+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
82436+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
82437+ m[0].units = size;
82438+ m[1].units = align;
82439 ret = (void *)m + align;
82440
82441 trace_kmalloc_node(caller, ret,
82442 size, size + align, gfp, node);
82443 } else {
82444 unsigned int order = get_order(size);
82445+ struct page *page;
82446
82447 if (likely(order))
82448 gfp |= __GFP_COMP;
82449- ret = slob_new_pages(gfp, order, node);
82450+ page = slob_new_pages(gfp, order, node);
82451+ if (page) {
82452+ ret = page_address(page);
82453+ page->private = size;
82454+ }
82455
82456 trace_kmalloc_node(caller, ret,
82457 size, PAGE_SIZE << order, gfp, node);
82458 }
82459
82460- kmemleak_alloc(ret, size, 1, gfp);
82461+ return ret;
82462+}
82463+
82464+static __always_inline void *
82465+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
82466+{
82467+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82468+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
82469+
82470+ if (!ZERO_OR_NULL_PTR(ret))
82471+ kmemleak_alloc(ret, size, 1, gfp);
82472 return ret;
82473 }
82474
82475@@ -494,33 +513,110 @@ void kfree(const void *block)
82476 kmemleak_free(block);
82477
82478 sp = virt_to_page(block);
82479- if (PageSlab(sp)) {
82480+ VM_BUG_ON(!PageSlab(sp));
82481+ if (!sp->private) {
82482 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82483- unsigned int *m = (unsigned int *)(block - align);
82484- slob_free(m, *m + align);
82485- } else
82486+ slob_t *m = (slob_t *)(block - align);
82487+ slob_free(m, m[0].units + align);
82488+ } else {
82489+ __ClearPageSlab(sp);
82490+ reset_page_mapcount(sp);
82491+ sp->private = 0;
82492 __free_pages(sp, compound_order(sp));
82493+ }
82494 }
82495 EXPORT_SYMBOL(kfree);
82496
82497+bool is_usercopy_object(const void *ptr)
82498+{
82499+ if (!slab_is_available())
82500+ return false;
82501+
82502+ // PAX: TODO
82503+
82504+ return false;
82505+}
82506+
82507+#ifdef CONFIG_PAX_USERCOPY
82508+const char *check_heap_object(const void *ptr, unsigned long n)
82509+{
82510+ struct page *page;
82511+ const slob_t *free;
82512+ const void *base;
82513+ unsigned long flags;
82514+
82515+ if (ZERO_OR_NULL_PTR(ptr))
82516+ return "<null>";
82517+
82518+ if (!virt_addr_valid(ptr))
82519+ return NULL;
82520+
82521+ page = virt_to_head_page(ptr);
82522+ if (!PageSlab(page))
82523+ return NULL;
82524+
82525+ if (page->private) {
82526+ base = page;
82527+ if (base <= ptr && n <= page->private - (ptr - base))
82528+ return NULL;
82529+ return "<slob>";
82530+ }
82531+
82532+ /* some tricky double walking to find the chunk */
82533+ spin_lock_irqsave(&slob_lock, flags);
82534+ base = (void *)((unsigned long)ptr & PAGE_MASK);
82535+ free = page->freelist;
82536+
82537+ while (!slob_last(free) && (void *)free <= ptr) {
82538+ base = free + slob_units(free);
82539+ free = slob_next(free);
82540+ }
82541+
82542+ while (base < (void *)free) {
82543+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
82544+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
82545+ int offset;
82546+
82547+ if (ptr < base + align)
82548+ break;
82549+
82550+ offset = ptr - base - align;
82551+ if (offset >= m) {
82552+ base += size;
82553+ continue;
82554+ }
82555+
82556+ if (n > m - offset)
82557+ break;
82558+
82559+ spin_unlock_irqrestore(&slob_lock, flags);
82560+ return NULL;
82561+ }
82562+
82563+ spin_unlock_irqrestore(&slob_lock, flags);
82564+ return "<slob>";
82565+}
82566+#endif
82567+
82568 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
82569 size_t ksize(const void *block)
82570 {
82571 struct page *sp;
82572 int align;
82573- unsigned int *m;
82574+ slob_t *m;
82575
82576 BUG_ON(!block);
82577 if (unlikely(block == ZERO_SIZE_PTR))
82578 return 0;
82579
82580 sp = virt_to_page(block);
82581- if (unlikely(!PageSlab(sp)))
82582- return PAGE_SIZE << compound_order(sp);
82583+ VM_BUG_ON(!PageSlab(sp));
82584+ if (sp->private)
82585+ return sp->private;
82586
82587 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
82588- m = (unsigned int *)(block - align);
82589- return SLOB_UNITS(*m) * SLOB_UNIT;
82590+ m = (slob_t *)(block - align);
82591+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
82592 }
82593 EXPORT_SYMBOL(ksize);
82594
82595@@ -536,23 +632,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
82596
82597 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
82598 {
82599- void *b;
82600+ void *b = NULL;
82601
82602 flags &= gfp_allowed_mask;
82603
82604 lockdep_trace_alloc(flags);
82605
82606+#ifdef CONFIG_PAX_USERCOPY_SLABS
82607+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
82608+#else
82609 if (c->size < PAGE_SIZE) {
82610 b = slob_alloc(c->size, flags, c->align, node);
82611 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
82612 SLOB_UNITS(c->size) * SLOB_UNIT,
82613 flags, node);
82614 } else {
82615- b = slob_new_pages(flags, get_order(c->size), node);
82616+ struct page *sp;
82617+
82618+ sp = slob_new_pages(flags, get_order(c->size), node);
82619+ if (sp) {
82620+ b = page_address(sp);
82621+ sp->private = c->size;
82622+ }
82623 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
82624 PAGE_SIZE << get_order(c->size),
82625 flags, node);
82626 }
82627+#endif
82628
82629 if (c->ctor)
82630 c->ctor(b);
82631@@ -564,10 +670,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
82632
82633 static void __kmem_cache_free(void *b, int size)
82634 {
82635- if (size < PAGE_SIZE)
82636+ struct page *sp;
82637+
82638+ sp = virt_to_page(b);
82639+ BUG_ON(!PageSlab(sp));
82640+ if (!sp->private)
82641 slob_free(b, size);
82642 else
82643- slob_free_pages(b, get_order(size));
82644+ slob_free_pages(sp, get_order(size));
82645 }
82646
82647 static void kmem_rcu_free(struct rcu_head *head)
82648@@ -580,17 +690,31 @@ static void kmem_rcu_free(struct rcu_head *head)
82649
82650 void kmem_cache_free(struct kmem_cache *c, void *b)
82651 {
82652+ int size = c->size;
82653+
82654+#ifdef CONFIG_PAX_USERCOPY_SLABS
82655+ if (size + c->align < PAGE_SIZE) {
82656+ size += c->align;
82657+ b -= c->align;
82658+ }
82659+#endif
82660+
82661 kmemleak_free_recursive(b, c->flags);
82662 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
82663 struct slob_rcu *slob_rcu;
82664- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
82665- slob_rcu->size = c->size;
82666+ slob_rcu = b + (size - sizeof(struct slob_rcu));
82667+ slob_rcu->size = size;
82668 call_rcu(&slob_rcu->head, kmem_rcu_free);
82669 } else {
82670- __kmem_cache_free(b, c->size);
82671+ __kmem_cache_free(b, size);
82672 }
82673
82674+#ifdef CONFIG_PAX_USERCOPY_SLABS
82675+ trace_kfree(_RET_IP_, b);
82676+#else
82677 trace_kmem_cache_free(_RET_IP_, b);
82678+#endif
82679+
82680 }
82681 EXPORT_SYMBOL(kmem_cache_free);
82682
82683diff --git a/mm/slub.c b/mm/slub.c
82684index ba2ca53..00b1f4e 100644
82685--- a/mm/slub.c
82686+++ b/mm/slub.c
82687@@ -197,7 +197,7 @@ struct track {
82688
82689 enum track_item { TRACK_ALLOC, TRACK_FREE };
82690
82691-#ifdef CONFIG_SYSFS
82692+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82693 static int sysfs_slab_add(struct kmem_cache *);
82694 static int sysfs_slab_alias(struct kmem_cache *, const char *);
82695 static void sysfs_slab_remove(struct kmem_cache *);
82696@@ -518,7 +518,7 @@ static void print_track(const char *s, struct track *t)
82697 if (!t->addr)
82698 return;
82699
82700- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
82701+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
82702 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
82703 #ifdef CONFIG_STACKTRACE
82704 {
82705@@ -2653,7 +2653,7 @@ static int slub_min_objects;
82706 * Merge control. If this is set then no merging of slab caches will occur.
82707 * (Could be removed. This was introduced to pacify the merge skeptics.)
82708 */
82709-static int slub_nomerge;
82710+static int slub_nomerge = 1;
82711
82712 /*
82713 * Calculate the order of allocation given an slab object size.
82714@@ -3181,6 +3181,10 @@ EXPORT_SYMBOL(kmalloc_caches);
82715 static struct kmem_cache *kmalloc_dma_caches[SLUB_PAGE_SHIFT];
82716 #endif
82717
82718+#ifdef CONFIG_PAX_USERCOPY_SLABS
82719+static struct kmem_cache *kmalloc_usercopy_caches[SLUB_PAGE_SHIFT];
82720+#endif
82721+
82722 static int __init setup_slub_min_order(char *str)
82723 {
82724 get_option(&str, &slub_min_order);
82725@@ -3272,6 +3276,13 @@ static struct kmem_cache *get_slab(size_t size, gfp_t flags)
82726 return kmalloc_dma_caches[index];
82727
82728 #endif
82729+
82730+#ifdef CONFIG_PAX_USERCOPY_SLABS
82731+ if (flags & SLAB_USERCOPY)
82732+ return kmalloc_usercopy_caches[index];
82733+
82734+#endif
82735+
82736 return kmalloc_caches[index];
82737 }
82738
82739@@ -3340,6 +3351,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
82740 EXPORT_SYMBOL(__kmalloc_node);
82741 #endif
82742
82743+bool is_usercopy_object(const void *ptr)
82744+{
82745+ struct page *page;
82746+ struct kmem_cache *s;
82747+
82748+ if (ZERO_OR_NULL_PTR(ptr))
82749+ return false;
82750+
82751+ if (!slab_is_available())
82752+ return false;
82753+
82754+ if (!virt_addr_valid(ptr))
82755+ return false;
82756+
82757+ page = virt_to_head_page(ptr);
82758+
82759+ if (!PageSlab(page))
82760+ return false;
82761+
82762+ s = page->slab_cache;
82763+ return s->flags & SLAB_USERCOPY;
82764+}
82765+
82766+#ifdef CONFIG_PAX_USERCOPY
82767+const char *check_heap_object(const void *ptr, unsigned long n)
82768+{
82769+ struct page *page;
82770+ struct kmem_cache *s;
82771+ unsigned long offset;
82772+
82773+ if (ZERO_OR_NULL_PTR(ptr))
82774+ return "<null>";
82775+
82776+ if (!virt_addr_valid(ptr))
82777+ return NULL;
82778+
82779+ page = virt_to_head_page(ptr);
82780+
82781+ if (!PageSlab(page))
82782+ return NULL;
82783+
82784+ s = page->slab_cache;
82785+ if (!(s->flags & SLAB_USERCOPY))
82786+ return s->name;
82787+
82788+ offset = (ptr - page_address(page)) % s->size;
82789+ if (offset <= s->object_size && n <= s->object_size - offset)
82790+ return NULL;
82791+
82792+ return s->name;
82793+}
82794+#endif
82795+
82796 size_t ksize(const void *object)
82797 {
82798 struct page *page;
82799@@ -3712,17 +3776,17 @@ void __init kmem_cache_init(void)
82800
82801 /* Caches that are not of the two-to-the-power-of size */
82802 if (KMALLOC_MIN_SIZE <= 32) {
82803- kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
82804+ kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
82805 caches++;
82806 }
82807
82808 if (KMALLOC_MIN_SIZE <= 64) {
82809- kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
82810+ kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
82811 caches++;
82812 }
82813
82814 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
82815- kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
82816+ kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
82817 caches++;
82818 }
82819
82820@@ -3764,6 +3828,22 @@ void __init kmem_cache_init(void)
82821 }
82822 }
82823 #endif
82824+
82825+#ifdef CONFIG_PAX_USERCOPY_SLABS
82826+ for (i = 0; i < SLUB_PAGE_SHIFT; i++) {
82827+ struct kmem_cache *s = kmalloc_caches[i];
82828+
82829+ if (s && s->size) {
82830+ char *name = kasprintf(GFP_NOWAIT,
82831+ "usercopy-kmalloc-%d", s->object_size);
82832+
82833+ BUG_ON(!name);
82834+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(name,
82835+ s->object_size, SLAB_USERCOPY);
82836+ }
82837+ }
82838+#endif
82839+
82840 printk(KERN_INFO
82841 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
82842 " CPUs=%d, Nodes=%d\n",
82843@@ -3790,7 +3870,7 @@ static int slab_unmergeable(struct kmem_cache *s)
82844 /*
82845 * We may have set a slab to be unmergeable during bootstrap.
82846 */
82847- if (s->refcount < 0)
82848+ if (atomic_read(&s->refcount) < 0)
82849 return 1;
82850
82851 return 0;
82852@@ -3848,7 +3928,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
82853
82854 s = find_mergeable(memcg, size, align, flags, name, ctor);
82855 if (s) {
82856- s->refcount++;
82857+ atomic_inc(&s->refcount);
82858 /*
82859 * Adjust the object sizes so that we clear
82860 * the complete object on kzalloc.
82861@@ -3857,7 +3937,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
82862 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
82863
82864 if (sysfs_slab_alias(s, name)) {
82865- s->refcount--;
82866+ atomic_dec(&s->refcount);
82867 s = NULL;
82868 }
82869 }
82870@@ -3919,7 +3999,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
82871 return NOTIFY_OK;
82872 }
82873
82874-static struct notifier_block __cpuinitdata slab_notifier = {
82875+static struct notifier_block slab_notifier = {
82876 .notifier_call = slab_cpuup_callback
82877 };
82878
82879@@ -3977,7 +4057,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
82880 }
82881 #endif
82882
82883-#ifdef CONFIG_SYSFS
82884+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82885 static int count_inuse(struct page *page)
82886 {
82887 return page->inuse;
82888@@ -4364,12 +4444,12 @@ static void resiliency_test(void)
82889 validate_slab_cache(kmalloc_caches[9]);
82890 }
82891 #else
82892-#ifdef CONFIG_SYSFS
82893+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82894 static void resiliency_test(void) {};
82895 #endif
82896 #endif
82897
82898-#ifdef CONFIG_SYSFS
82899+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82900 enum slab_stat_type {
82901 SL_ALL, /* All slabs */
82902 SL_PARTIAL, /* Only partially allocated slabs */
82903@@ -4613,7 +4693,7 @@ SLAB_ATTR_RO(ctor);
82904
82905 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
82906 {
82907- return sprintf(buf, "%d\n", s->refcount - 1);
82908+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
82909 }
82910 SLAB_ATTR_RO(aliases);
82911
82912@@ -5266,6 +5346,7 @@ static char *create_unique_id(struct kmem_cache *s)
82913 return name;
82914 }
82915
82916+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82917 static int sysfs_slab_add(struct kmem_cache *s)
82918 {
82919 int err;
82920@@ -5323,6 +5404,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
82921 kobject_del(&s->kobj);
82922 kobject_put(&s->kobj);
82923 }
82924+#endif
82925
82926 /*
82927 * Need to buffer aliases during bootup until sysfs becomes
82928@@ -5336,6 +5418,7 @@ struct saved_alias {
82929
82930 static struct saved_alias *alias_list;
82931
82932+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
82933 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
82934 {
82935 struct saved_alias *al;
82936@@ -5358,6 +5441,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
82937 alias_list = al;
82938 return 0;
82939 }
82940+#endif
82941
82942 static int __init slab_sysfs_init(void)
82943 {
82944diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
82945index 1b7e22a..3fcd4f3 100644
82946--- a/mm/sparse-vmemmap.c
82947+++ b/mm/sparse-vmemmap.c
82948@@ -128,7 +128,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
82949 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
82950 if (!p)
82951 return NULL;
82952- pud_populate(&init_mm, pud, p);
82953+ pud_populate_kernel(&init_mm, pud, p);
82954 }
82955 return pud;
82956 }
82957@@ -140,7 +140,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
82958 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
82959 if (!p)
82960 return NULL;
82961- pgd_populate(&init_mm, pgd, p);
82962+ pgd_populate_kernel(&init_mm, pgd, p);
82963 }
82964 return pgd;
82965 }
82966diff --git a/mm/sparse.c b/mm/sparse.c
82967index 6b5fb76..db0c190 100644
82968--- a/mm/sparse.c
82969+++ b/mm/sparse.c
82970@@ -782,7 +782,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
82971
82972 for (i = 0; i < PAGES_PER_SECTION; i++) {
82973 if (PageHWPoison(&memmap[i])) {
82974- atomic_long_sub(1, &mce_bad_pages);
82975+ atomic_long_sub_unchecked(1, &mce_bad_pages);
82976 ClearPageHWPoison(&memmap[i]);
82977 }
82978 }
82979diff --git a/mm/swap.c b/mm/swap.c
82980index 6310dc2..3662b3f 100644
82981--- a/mm/swap.c
82982+++ b/mm/swap.c
82983@@ -30,6 +30,7 @@
82984 #include <linux/backing-dev.h>
82985 #include <linux/memcontrol.h>
82986 #include <linux/gfp.h>
82987+#include <linux/hugetlb.h>
82988
82989 #include "internal.h"
82990
82991@@ -72,6 +73,8 @@ static void __put_compound_page(struct page *page)
82992
82993 __page_cache_release(page);
82994 dtor = get_compound_page_dtor(page);
82995+ if (!PageHuge(page))
82996+ BUG_ON(dtor != free_compound_page);
82997 (*dtor)(page);
82998 }
82999
83000diff --git a/mm/swapfile.c b/mm/swapfile.c
83001index e97a0e5..b50e796 100644
83002--- a/mm/swapfile.c
83003+++ b/mm/swapfile.c
83004@@ -64,7 +64,7 @@ static DEFINE_MUTEX(swapon_mutex);
83005
83006 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
83007 /* Activity counter to indicate that a swapon or swapoff has occurred */
83008-static atomic_t proc_poll_event = ATOMIC_INIT(0);
83009+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
83010
83011 static inline unsigned char swap_count(unsigned char ent)
83012 {
83013@@ -1608,7 +1608,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
83014 }
83015 filp_close(swap_file, NULL);
83016 err = 0;
83017- atomic_inc(&proc_poll_event);
83018+ atomic_inc_unchecked(&proc_poll_event);
83019 wake_up_interruptible(&proc_poll_wait);
83020
83021 out_dput:
83022@@ -1625,8 +1625,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
83023
83024 poll_wait(file, &proc_poll_wait, wait);
83025
83026- if (seq->poll_event != atomic_read(&proc_poll_event)) {
83027- seq->poll_event = atomic_read(&proc_poll_event);
83028+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
83029+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
83030 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
83031 }
83032
83033@@ -1724,7 +1724,7 @@ static int swaps_open(struct inode *inode, struct file *file)
83034 return ret;
83035
83036 seq = file->private_data;
83037- seq->poll_event = atomic_read(&proc_poll_event);
83038+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
83039 return 0;
83040 }
83041
83042@@ -2066,7 +2066,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
83043 (frontswap_map) ? "FS" : "");
83044
83045 mutex_unlock(&swapon_mutex);
83046- atomic_inc(&proc_poll_event);
83047+ atomic_inc_unchecked(&proc_poll_event);
83048 wake_up_interruptible(&proc_poll_wait);
83049
83050 if (S_ISREG(inode->i_mode))
83051diff --git a/mm/util.c b/mm/util.c
83052index c55e26b..3f913a9 100644
83053--- a/mm/util.c
83054+++ b/mm/util.c
83055@@ -292,6 +292,12 @@ done:
83056 void arch_pick_mmap_layout(struct mm_struct *mm)
83057 {
83058 mm->mmap_base = TASK_UNMAPPED_BASE;
83059+
83060+#ifdef CONFIG_PAX_RANDMMAP
83061+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83062+ mm->mmap_base += mm->delta_mmap;
83063+#endif
83064+
83065 mm->get_unmapped_area = arch_get_unmapped_area;
83066 mm->unmap_area = arch_unmap_area;
83067 }
83068diff --git a/mm/vmalloc.c b/mm/vmalloc.c
83069index 5123a16..f234a48 100644
83070--- a/mm/vmalloc.c
83071+++ b/mm/vmalloc.c
83072@@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
83073
83074 pte = pte_offset_kernel(pmd, addr);
83075 do {
83076- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
83077- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
83078+
83079+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
83080+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
83081+ BUG_ON(!pte_exec(*pte));
83082+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
83083+ continue;
83084+ }
83085+#endif
83086+
83087+ {
83088+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
83089+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
83090+ }
83091 } while (pte++, addr += PAGE_SIZE, addr != end);
83092 }
83093
83094@@ -100,16 +111,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
83095 pte = pte_alloc_kernel(pmd, addr);
83096 if (!pte)
83097 return -ENOMEM;
83098+
83099+ pax_open_kernel();
83100 do {
83101 struct page *page = pages[*nr];
83102
83103- if (WARN_ON(!pte_none(*pte)))
83104+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
83105+ if (pgprot_val(prot) & _PAGE_NX)
83106+#endif
83107+
83108+ if (!pte_none(*pte)) {
83109+ pax_close_kernel();
83110+ WARN_ON(1);
83111 return -EBUSY;
83112- if (WARN_ON(!page))
83113+ }
83114+ if (!page) {
83115+ pax_close_kernel();
83116+ WARN_ON(1);
83117 return -ENOMEM;
83118+ }
83119 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
83120 (*nr)++;
83121 } while (pte++, addr += PAGE_SIZE, addr != end);
83122+ pax_close_kernel();
83123 return 0;
83124 }
83125
83126@@ -119,7 +143,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
83127 pmd_t *pmd;
83128 unsigned long next;
83129
83130- pmd = pmd_alloc(&init_mm, pud, addr);
83131+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
83132 if (!pmd)
83133 return -ENOMEM;
83134 do {
83135@@ -136,7 +160,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
83136 pud_t *pud;
83137 unsigned long next;
83138
83139- pud = pud_alloc(&init_mm, pgd, addr);
83140+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
83141 if (!pud)
83142 return -ENOMEM;
83143 do {
83144@@ -191,11 +215,20 @@ int is_vmalloc_or_module_addr(const void *x)
83145 * and fall back on vmalloc() if that fails. Others
83146 * just put it in the vmalloc space.
83147 */
83148-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
83149+#ifdef CONFIG_MODULES
83150+#ifdef MODULES_VADDR
83151 unsigned long addr = (unsigned long)x;
83152 if (addr >= MODULES_VADDR && addr < MODULES_END)
83153 return 1;
83154 #endif
83155+
83156+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
83157+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
83158+ return 1;
83159+#endif
83160+
83161+#endif
83162+
83163 return is_vmalloc_addr(x);
83164 }
83165
83166@@ -216,8 +249,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
83167
83168 if (!pgd_none(*pgd)) {
83169 pud_t *pud = pud_offset(pgd, addr);
83170+#ifdef CONFIG_X86
83171+ if (!pud_large(*pud))
83172+#endif
83173 if (!pud_none(*pud)) {
83174 pmd_t *pmd = pmd_offset(pud, addr);
83175+#ifdef CONFIG_X86
83176+ if (!pmd_large(*pmd))
83177+#endif
83178 if (!pmd_none(*pmd)) {
83179 pte_t *ptep, pte;
83180
83181@@ -329,7 +368,7 @@ static void purge_vmap_area_lazy(void);
83182 * Allocate a region of KVA of the specified size and alignment, within the
83183 * vstart and vend.
83184 */
83185-static struct vmap_area *alloc_vmap_area(unsigned long size,
83186+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
83187 unsigned long align,
83188 unsigned long vstart, unsigned long vend,
83189 int node, gfp_t gfp_mask)
83190@@ -1328,6 +1367,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
83191 struct vm_struct *area;
83192
83193 BUG_ON(in_interrupt());
83194+
83195+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83196+ if (flags & VM_KERNEXEC) {
83197+ if (start != VMALLOC_START || end != VMALLOC_END)
83198+ return NULL;
83199+ start = (unsigned long)MODULES_EXEC_VADDR;
83200+ end = (unsigned long)MODULES_EXEC_END;
83201+ }
83202+#endif
83203+
83204 if (flags & VM_IOREMAP) {
83205 int bit = fls(size);
83206
83207@@ -1568,6 +1617,11 @@ void *vmap(struct page **pages, unsigned int count,
83208 if (count > totalram_pages)
83209 return NULL;
83210
83211+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83212+ if (!(pgprot_val(prot) & _PAGE_NX))
83213+ flags |= VM_KERNEXEC;
83214+#endif
83215+
83216 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
83217 __builtin_return_address(0));
83218 if (!area)
83219@@ -1669,6 +1723,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
83220 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
83221 goto fail;
83222
83223+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
83224+ if (!(pgprot_val(prot) & _PAGE_NX))
83225+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
83226+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
83227+ else
83228+#endif
83229+
83230 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
83231 start, end, node, gfp_mask, caller);
83232 if (!area)
83233@@ -1842,10 +1903,9 @@ EXPORT_SYMBOL(vzalloc_node);
83234 * For tight control over page level allocator and protection flags
83235 * use __vmalloc() instead.
83236 */
83237-
83238 void *vmalloc_exec(unsigned long size)
83239 {
83240- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
83241+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
83242 -1, __builtin_return_address(0));
83243 }
83244
83245@@ -2136,6 +2196,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
83246 unsigned long uaddr = vma->vm_start;
83247 unsigned long usize = vma->vm_end - vma->vm_start;
83248
83249+ BUG_ON(vma->vm_mirror);
83250+
83251 if ((PAGE_SIZE-1) & (unsigned long)addr)
83252 return -EINVAL;
83253
83254@@ -2575,7 +2637,11 @@ static int s_show(struct seq_file *m, void *p)
83255 v->addr, v->addr + v->size, v->size);
83256
83257 if (v->caller)
83258+#ifdef CONFIG_GRKERNSEC_HIDESYM
83259+ seq_printf(m, " %pK", v->caller);
83260+#else
83261 seq_printf(m, " %pS", v->caller);
83262+#endif
83263
83264 if (v->nr_pages)
83265 seq_printf(m, " pages=%d", v->nr_pages);
83266diff --git a/mm/vmstat.c b/mm/vmstat.c
83267index 9800306..76b4b27 100644
83268--- a/mm/vmstat.c
83269+++ b/mm/vmstat.c
83270@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
83271 *
83272 * vm_stat contains the global counters
83273 */
83274-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
83275+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
83276 EXPORT_SYMBOL(vm_stat);
83277
83278 #ifdef CONFIG_SMP
83279@@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
83280 v = p->vm_stat_diff[i];
83281 p->vm_stat_diff[i] = 0;
83282 local_irq_restore(flags);
83283- atomic_long_add(v, &zone->vm_stat[i]);
83284+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
83285 global_diff[i] += v;
83286 #ifdef CONFIG_NUMA
83287 /* 3 seconds idle till flush */
83288@@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
83289
83290 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
83291 if (global_diff[i])
83292- atomic_long_add(global_diff[i], &vm_stat[i]);
83293+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
83294 }
83295
83296 void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
83297@@ -503,8 +503,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
83298 if (pset->vm_stat_diff[i]) {
83299 int v = pset->vm_stat_diff[i];
83300 pset->vm_stat_diff[i] = 0;
83301- atomic_long_add(v, &zone->vm_stat[i]);
83302- atomic_long_add(v, &vm_stat[i]);
83303+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
83304+ atomic_long_add_unchecked(v, &vm_stat[i]);
83305 }
83306 }
83307 #endif
83308@@ -1223,7 +1223,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
83309 return NOTIFY_OK;
83310 }
83311
83312-static struct notifier_block __cpuinitdata vmstat_notifier =
83313+static struct notifier_block vmstat_notifier =
83314 { &vmstat_cpuup_callback, NULL, 0 };
83315 #endif
83316
83317@@ -1238,10 +1238,20 @@ static int __init setup_vmstat(void)
83318 start_cpu_timer(cpu);
83319 #endif
83320 #ifdef CONFIG_PROC_FS
83321- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
83322- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
83323- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
83324- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
83325+ {
83326+ mode_t gr_mode = S_IRUGO;
83327+#ifdef CONFIG_GRKERNSEC_PROC_ADD
83328+ gr_mode = S_IRUSR;
83329+#endif
83330+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
83331+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
83332+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
83333+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
83334+#else
83335+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
83336+#endif
83337+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
83338+ }
83339 #endif
83340 return 0;
83341 }
83342diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
83343index a292e80..785ee68 100644
83344--- a/net/8021q/vlan.c
83345+++ b/net/8021q/vlan.c
83346@@ -485,7 +485,7 @@ out:
83347 return NOTIFY_DONE;
83348 }
83349
83350-static struct notifier_block vlan_notifier_block __read_mostly = {
83351+static struct notifier_block vlan_notifier_block = {
83352 .notifier_call = vlan_device_event,
83353 };
83354
83355@@ -560,8 +560,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
83356 err = -EPERM;
83357 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
83358 break;
83359- if ((args.u.name_type >= 0) &&
83360- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
83361+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
83362 struct vlan_net *vn;
83363
83364 vn = net_generic(net, vlan_net_id);
83365diff --git a/net/9p/mod.c b/net/9p/mod.c
83366index 6ab36ae..6f1841b 100644
83367--- a/net/9p/mod.c
83368+++ b/net/9p/mod.c
83369@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
83370 void v9fs_register_trans(struct p9_trans_module *m)
83371 {
83372 spin_lock(&v9fs_trans_lock);
83373- list_add_tail(&m->list, &v9fs_trans_list);
83374+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
83375 spin_unlock(&v9fs_trans_lock);
83376 }
83377 EXPORT_SYMBOL(v9fs_register_trans);
83378@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
83379 void v9fs_unregister_trans(struct p9_trans_module *m)
83380 {
83381 spin_lock(&v9fs_trans_lock);
83382- list_del_init(&m->list);
83383+ pax_list_del_init((struct list_head *)&m->list);
83384 spin_unlock(&v9fs_trans_lock);
83385 }
83386 EXPORT_SYMBOL(v9fs_unregister_trans);
83387diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
83388index 02efb25..41541a9 100644
83389--- a/net/9p/trans_fd.c
83390+++ b/net/9p/trans_fd.c
83391@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
83392 oldfs = get_fs();
83393 set_fs(get_ds());
83394 /* The cast to a user pointer is valid due to the set_fs() */
83395- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
83396+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
83397 set_fs(oldfs);
83398
83399 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
83400diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
83401index 876fbe8..8bbea9f 100644
83402--- a/net/atm/atm_misc.c
83403+++ b/net/atm/atm_misc.c
83404@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
83405 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
83406 return 1;
83407 atm_return(vcc, truesize);
83408- atomic_inc(&vcc->stats->rx_drop);
83409+ atomic_inc_unchecked(&vcc->stats->rx_drop);
83410 return 0;
83411 }
83412 EXPORT_SYMBOL(atm_charge);
83413@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
83414 }
83415 }
83416 atm_return(vcc, guess);
83417- atomic_inc(&vcc->stats->rx_drop);
83418+ atomic_inc_unchecked(&vcc->stats->rx_drop);
83419 return NULL;
83420 }
83421 EXPORT_SYMBOL(atm_alloc_charge);
83422@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
83423
83424 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
83425 {
83426-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
83427+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
83428 __SONET_ITEMS
83429 #undef __HANDLE_ITEM
83430 }
83431@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
83432
83433 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
83434 {
83435-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
83436+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
83437 __SONET_ITEMS
83438 #undef __HANDLE_ITEM
83439 }
83440diff --git a/net/atm/lec.h b/net/atm/lec.h
83441index a86aff9..3a0d6f6 100644
83442--- a/net/atm/lec.h
83443+++ b/net/atm/lec.h
83444@@ -48,7 +48,7 @@ struct lane2_ops {
83445 const u8 *tlvs, u32 sizeoftlvs);
83446 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
83447 const u8 *tlvs, u32 sizeoftlvs);
83448-};
83449+} __no_const;
83450
83451 /*
83452 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
83453diff --git a/net/atm/proc.c b/net/atm/proc.c
83454index 0d020de..011c7bb 100644
83455--- a/net/atm/proc.c
83456+++ b/net/atm/proc.c
83457@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
83458 const struct k_atm_aal_stats *stats)
83459 {
83460 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
83461- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
83462- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
83463- atomic_read(&stats->rx_drop));
83464+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
83465+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
83466+ atomic_read_unchecked(&stats->rx_drop));
83467 }
83468
83469 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
83470diff --git a/net/atm/resources.c b/net/atm/resources.c
83471index 0447d5d..3cf4728 100644
83472--- a/net/atm/resources.c
83473+++ b/net/atm/resources.c
83474@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
83475 static void copy_aal_stats(struct k_atm_aal_stats *from,
83476 struct atm_aal_stats *to)
83477 {
83478-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
83479+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
83480 __AAL_STAT_ITEMS
83481 #undef __HANDLE_ITEM
83482 }
83483@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
83484 static void subtract_aal_stats(struct k_atm_aal_stats *from,
83485 struct atm_aal_stats *to)
83486 {
83487-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
83488+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
83489 __AAL_STAT_ITEMS
83490 #undef __HANDLE_ITEM
83491 }
83492diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
83493index d5744b7..506bae3 100644
83494--- a/net/ax25/sysctl_net_ax25.c
83495+++ b/net/ax25/sysctl_net_ax25.c
83496@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
83497 {
83498 char path[sizeof("net/ax25/") + IFNAMSIZ];
83499 int k;
83500- struct ctl_table *table;
83501+ ctl_table_no_const *table;
83502
83503 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
83504 if (!table)
83505diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
83506index 7d02ebd..4d4cc01 100644
83507--- a/net/batman-adv/bat_iv_ogm.c
83508+++ b/net/batman-adv/bat_iv_ogm.c
83509@@ -63,7 +63,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
83510
83511 /* randomize initial seqno to avoid collision */
83512 get_random_bytes(&random_seqno, sizeof(random_seqno));
83513- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
83514+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
83515
83516 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
83517 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
83518@@ -615,9 +615,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
83519 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
83520
83521 /* change sequence number to network order */
83522- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
83523+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
83524 batadv_ogm_packet->seqno = htonl(seqno);
83525- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
83526+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
83527
83528 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
83529 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
83530@@ -1022,7 +1022,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
83531 return;
83532
83533 /* could be changed by schedule_own_packet() */
83534- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
83535+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
83536
83537 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
83538 has_directlink_flag = 1;
83539diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
83540index f1d37cd..4190879 100644
83541--- a/net/batman-adv/hard-interface.c
83542+++ b/net/batman-adv/hard-interface.c
83543@@ -370,7 +370,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
83544 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
83545 dev_add_pack(&hard_iface->batman_adv_ptype);
83546
83547- atomic_set(&hard_iface->frag_seqno, 1);
83548+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
83549 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
83550 hard_iface->net_dev->name);
83551
83552@@ -493,7 +493,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
83553 /* This can't be called via a bat_priv callback because
83554 * we have no bat_priv yet.
83555 */
83556- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
83557+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
83558 hard_iface->bat_iv.ogm_buff = NULL;
83559
83560 return hard_iface;
83561diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
83562index 6b548fd..fc32c8d 100644
83563--- a/net/batman-adv/soft-interface.c
83564+++ b/net/batman-adv/soft-interface.c
83565@@ -252,7 +252,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
83566 primary_if->net_dev->dev_addr, ETH_ALEN);
83567
83568 /* set broadcast sequence number */
83569- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
83570+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
83571 bcast_packet->seqno = htonl(seqno);
83572
83573 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
83574@@ -497,7 +497,7 @@ struct net_device *batadv_softif_create(const char *name)
83575 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
83576
83577 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
83578- atomic_set(&bat_priv->bcast_seqno, 1);
83579+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
83580 atomic_set(&bat_priv->tt.vn, 0);
83581 atomic_set(&bat_priv->tt.local_changes, 0);
83582 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
83583diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
83584index ae9ac9a..11e0fe7 100644
83585--- a/net/batman-adv/types.h
83586+++ b/net/batman-adv/types.h
83587@@ -48,7 +48,7 @@
83588 struct batadv_hard_iface_bat_iv {
83589 unsigned char *ogm_buff;
83590 int ogm_buff_len;
83591- atomic_t ogm_seqno;
83592+ atomic_unchecked_t ogm_seqno;
83593 };
83594
83595 struct batadv_hard_iface {
83596@@ -56,7 +56,7 @@ struct batadv_hard_iface {
83597 int16_t if_num;
83598 char if_status;
83599 struct net_device *net_dev;
83600- atomic_t frag_seqno;
83601+ atomic_unchecked_t frag_seqno;
83602 struct kobject *hardif_obj;
83603 atomic_t refcount;
83604 struct packet_type batman_adv_ptype;
83605@@ -284,7 +284,7 @@ struct batadv_priv {
83606 atomic_t orig_interval; /* uint */
83607 atomic_t hop_penalty; /* uint */
83608 atomic_t log_level; /* uint */
83609- atomic_t bcast_seqno;
83610+ atomic_unchecked_t bcast_seqno;
83611 atomic_t bcast_queue_left;
83612 atomic_t batman_queue_left;
83613 char num_ifaces;
83614diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
83615index 10aff49..ea8e021 100644
83616--- a/net/batman-adv/unicast.c
83617+++ b/net/batman-adv/unicast.c
83618@@ -272,7 +272,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
83619 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
83620 frag2->flags = large_tail;
83621
83622- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
83623+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
83624 frag1->seqno = htons(seqno - 1);
83625 frag2->seqno = htons(seqno);
83626
83627diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
83628index 07f0739..3c42e34 100644
83629--- a/net/bluetooth/hci_sock.c
83630+++ b/net/bluetooth/hci_sock.c
83631@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
83632 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
83633 }
83634
83635- len = min_t(unsigned int, len, sizeof(uf));
83636+ len = min((size_t)len, sizeof(uf));
83637 if (copy_from_user(&uf, optval, len)) {
83638 err = -EFAULT;
83639 break;
83640diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
83641index 22e6583..426e2f3 100644
83642--- a/net/bluetooth/l2cap_core.c
83643+++ b/net/bluetooth/l2cap_core.c
83644@@ -3400,8 +3400,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
83645 break;
83646
83647 case L2CAP_CONF_RFC:
83648- if (olen == sizeof(rfc))
83649- memcpy(&rfc, (void *)val, olen);
83650+ if (olen != sizeof(rfc))
83651+ break;
83652+
83653+ memcpy(&rfc, (void *)val, olen);
83654
83655 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
83656 rfc.mode != chan->mode)
83657diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
83658index 1bcfb84..dad9f98 100644
83659--- a/net/bluetooth/l2cap_sock.c
83660+++ b/net/bluetooth/l2cap_sock.c
83661@@ -479,7 +479,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
83662 struct sock *sk = sock->sk;
83663 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
83664 struct l2cap_options opts;
83665- int len, err = 0;
83666+ int err = 0;
83667+ size_t len = optlen;
83668 u32 opt;
83669
83670 BT_DBG("sk %p", sk);
83671@@ -501,7 +502,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
83672 opts.max_tx = chan->max_tx;
83673 opts.txwin_size = chan->tx_win;
83674
83675- len = min_t(unsigned int, sizeof(opts), optlen);
83676+ len = min(sizeof(opts), len);
83677 if (copy_from_user((char *) &opts, optval, len)) {
83678 err = -EFAULT;
83679 break;
83680@@ -581,7 +582,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
83681 struct bt_security sec;
83682 struct bt_power pwr;
83683 struct l2cap_conn *conn;
83684- int len, err = 0;
83685+ int err = 0;
83686+ size_t len = optlen;
83687 u32 opt;
83688
83689 BT_DBG("sk %p", sk);
83690@@ -604,7 +606,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
83691
83692 sec.level = BT_SECURITY_LOW;
83693
83694- len = min_t(unsigned int, sizeof(sec), optlen);
83695+ len = min(sizeof(sec), len);
83696 if (copy_from_user((char *) &sec, optval, len)) {
83697 err = -EFAULT;
83698 break;
83699@@ -701,7 +703,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
83700
83701 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
83702
83703- len = min_t(unsigned int, sizeof(pwr), optlen);
83704+ len = min(sizeof(pwr), len);
83705 if (copy_from_user((char *) &pwr, optval, len)) {
83706 err = -EFAULT;
83707 break;
83708diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
83709index ce3f665..2c7d08f 100644
83710--- a/net/bluetooth/rfcomm/sock.c
83711+++ b/net/bluetooth/rfcomm/sock.c
83712@@ -667,7 +667,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
83713 struct sock *sk = sock->sk;
83714 struct bt_security sec;
83715 int err = 0;
83716- size_t len;
83717+ size_t len = optlen;
83718 u32 opt;
83719
83720 BT_DBG("sk %p", sk);
83721@@ -689,7 +689,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
83722
83723 sec.level = BT_SECURITY_LOW;
83724
83725- len = min_t(unsigned int, sizeof(sec), optlen);
83726+ len = min(sizeof(sec), len);
83727 if (copy_from_user((char *) &sec, optval, len)) {
83728 err = -EFAULT;
83729 break;
83730diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
83731index bd6fd0f..6492cba 100644
83732--- a/net/bluetooth/rfcomm/tty.c
83733+++ b/net/bluetooth/rfcomm/tty.c
83734@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
83735 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
83736
83737 spin_lock_irqsave(&dev->port.lock, flags);
83738- if (dev->port.count > 0) {
83739+ if (atomic_read(&dev->port.count) > 0) {
83740 spin_unlock_irqrestore(&dev->port.lock, flags);
83741 return;
83742 }
83743@@ -664,10 +664,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
83744 return -ENODEV;
83745
83746 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
83747- dev->channel, dev->port.count);
83748+ dev->channel, atomic_read(&dev->port.count));
83749
83750 spin_lock_irqsave(&dev->port.lock, flags);
83751- if (++dev->port.count > 1) {
83752+ if (atomic_inc_return(&dev->port.count) > 1) {
83753 spin_unlock_irqrestore(&dev->port.lock, flags);
83754 return 0;
83755 }
83756@@ -732,10 +732,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
83757 return;
83758
83759 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
83760- dev->port.count);
83761+ atomic_read(&dev->port.count));
83762
83763 spin_lock_irqsave(&dev->port.lock, flags);
83764- if (!--dev->port.count) {
83765+ if (!atomic_dec_return(&dev->port.count)) {
83766 spin_unlock_irqrestore(&dev->port.lock, flags);
83767 if (dev->tty_dev->parent)
83768 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
83769diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
83770index acc9f4c..2897e40 100644
83771--- a/net/bridge/br_mdb.c
83772+++ b/net/bridge/br_mdb.c
83773@@ -82,6 +82,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
83774 port = p->port;
83775 if (port) {
83776 struct br_mdb_entry e;
83777+ memset(&e, 0, sizeof(e));
83778 e.ifindex = port->dev->ifindex;
83779 e.state = p->state;
83780 if (p->addr.proto == htons(ETH_P_IP))
83781@@ -138,6 +139,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
83782 break;
83783
83784 bpm = nlmsg_data(nlh);
83785+ memset(bpm, 0, sizeof(*bpm));
83786 bpm->ifindex = dev->ifindex;
83787 if (br_mdb_fill_info(skb, cb, dev) < 0)
83788 goto out;
83789@@ -173,6 +175,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
83790 return -EMSGSIZE;
83791
83792 bpm = nlmsg_data(nlh);
83793+ memset(bpm, 0, sizeof(*bpm));
83794 bpm->family = AF_BRIDGE;
83795 bpm->ifindex = dev->ifindex;
83796 nest = nla_nest_start(skb, MDBA_MDB);
83797@@ -230,6 +233,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
83798 {
83799 struct br_mdb_entry entry;
83800
83801+ memset(&entry, 0, sizeof(entry));
83802 entry.ifindex = port->dev->ifindex;
83803 entry.addr.proto = group->proto;
83804 entry.addr.u.ip4 = group->u.ip4;
83805diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
83806index 5fe2ff3..121d696 100644
83807--- a/net/bridge/netfilter/ebtables.c
83808+++ b/net/bridge/netfilter/ebtables.c
83809@@ -1523,7 +1523,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
83810 tmp.valid_hooks = t->table->valid_hooks;
83811 }
83812 mutex_unlock(&ebt_mutex);
83813- if (copy_to_user(user, &tmp, *len) != 0){
83814+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
83815 BUGPRINT("c2u Didn't work\n");
83816 ret = -EFAULT;
83817 break;
83818@@ -2327,7 +2327,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
83819 goto out;
83820 tmp.valid_hooks = t->valid_hooks;
83821
83822- if (copy_to_user(user, &tmp, *len) != 0) {
83823+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
83824 ret = -EFAULT;
83825 break;
83826 }
83827@@ -2338,7 +2338,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
83828 tmp.entries_size = t->table->entries_size;
83829 tmp.valid_hooks = t->table->valid_hooks;
83830
83831- if (copy_to_user(user, &tmp, *len) != 0) {
83832+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
83833 ret = -EFAULT;
83834 break;
83835 }
83836diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
83837index a376ec1..1fbd6be 100644
83838--- a/net/caif/cfctrl.c
83839+++ b/net/caif/cfctrl.c
83840@@ -10,6 +10,7 @@
83841 #include <linux/spinlock.h>
83842 #include <linux/slab.h>
83843 #include <linux/pkt_sched.h>
83844+#include <linux/sched.h>
83845 #include <net/caif/caif_layer.h>
83846 #include <net/caif/cfpkt.h>
83847 #include <net/caif/cfctrl.h>
83848@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
83849 memset(&dev_info, 0, sizeof(dev_info));
83850 dev_info.id = 0xff;
83851 cfsrvl_init(&this->serv, 0, &dev_info, false);
83852- atomic_set(&this->req_seq_no, 1);
83853- atomic_set(&this->rsp_seq_no, 1);
83854+ atomic_set_unchecked(&this->req_seq_no, 1);
83855+ atomic_set_unchecked(&this->rsp_seq_no, 1);
83856 this->serv.layer.receive = cfctrl_recv;
83857 sprintf(this->serv.layer.name, "ctrl");
83858 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
83859@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
83860 struct cfctrl_request_info *req)
83861 {
83862 spin_lock_bh(&ctrl->info_list_lock);
83863- atomic_inc(&ctrl->req_seq_no);
83864- req->sequence_no = atomic_read(&ctrl->req_seq_no);
83865+ atomic_inc_unchecked(&ctrl->req_seq_no);
83866+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
83867 list_add_tail(&req->list, &ctrl->list);
83868 spin_unlock_bh(&ctrl->info_list_lock);
83869 }
83870@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
83871 if (p != first)
83872 pr_warn("Requests are not received in order\n");
83873
83874- atomic_set(&ctrl->rsp_seq_no,
83875+ atomic_set_unchecked(&ctrl->rsp_seq_no,
83876 p->sequence_no);
83877 list_del(&p->list);
83878 goto out;
83879diff --git a/net/can/af_can.c b/net/can/af_can.c
83880index ddac1ee..3ee0a78 100644
83881--- a/net/can/af_can.c
83882+++ b/net/can/af_can.c
83883@@ -872,7 +872,7 @@ static const struct net_proto_family can_family_ops = {
83884 };
83885
83886 /* notifier block for netdevice event */
83887-static struct notifier_block can_netdev_notifier __read_mostly = {
83888+static struct notifier_block can_netdev_notifier = {
83889 .notifier_call = can_notifier,
83890 };
83891
83892diff --git a/net/can/gw.c b/net/can/gw.c
83893index 574dda78e..3d2b3da 100644
83894--- a/net/can/gw.c
83895+++ b/net/can/gw.c
83896@@ -67,7 +67,6 @@ MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
83897 MODULE_ALIAS("can-gw");
83898
83899 static HLIST_HEAD(cgw_list);
83900-static struct notifier_block notifier;
83901
83902 static struct kmem_cache *cgw_cache __read_mostly;
83903
83904@@ -893,6 +892,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
83905 return err;
83906 }
83907
83908+static struct notifier_block notifier = {
83909+ .notifier_call = cgw_notifier
83910+};
83911+
83912 static __init int cgw_module_init(void)
83913 {
83914 printk(banner);
83915@@ -904,7 +907,6 @@ static __init int cgw_module_init(void)
83916 return -ENOMEM;
83917
83918 /* set notifier */
83919- notifier.notifier_call = cgw_notifier;
83920 register_netdevice_notifier(&notifier);
83921
83922 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
83923diff --git a/net/compat.c b/net/compat.c
83924index 79ae884..17c5c09 100644
83925--- a/net/compat.c
83926+++ b/net/compat.c
83927@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
83928 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
83929 __get_user(kmsg->msg_flags, &umsg->msg_flags))
83930 return -EFAULT;
83931- kmsg->msg_name = compat_ptr(tmp1);
83932- kmsg->msg_iov = compat_ptr(tmp2);
83933- kmsg->msg_control = compat_ptr(tmp3);
83934+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
83935+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
83936+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
83937 return 0;
83938 }
83939
83940@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
83941
83942 if (kern_msg->msg_namelen) {
83943 if (mode == VERIFY_READ) {
83944- int err = move_addr_to_kernel(kern_msg->msg_name,
83945+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
83946 kern_msg->msg_namelen,
83947 kern_address);
83948 if (err < 0)
83949@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
83950 kern_msg->msg_name = NULL;
83951
83952 tot_len = iov_from_user_compat_to_kern(kern_iov,
83953- (struct compat_iovec __user *)kern_msg->msg_iov,
83954+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
83955 kern_msg->msg_iovlen);
83956 if (tot_len >= 0)
83957 kern_msg->msg_iov = kern_iov;
83958@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
83959
83960 #define CMSG_COMPAT_FIRSTHDR(msg) \
83961 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
83962- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
83963+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
83964 (struct compat_cmsghdr __user *)NULL)
83965
83966 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
83967 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
83968 (ucmlen) <= (unsigned long) \
83969 ((mhdr)->msg_controllen - \
83970- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
83971+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
83972
83973 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
83974 struct compat_cmsghdr __user *cmsg, int cmsg_len)
83975 {
83976 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
83977- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
83978+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
83979 msg->msg_controllen)
83980 return NULL;
83981 return (struct compat_cmsghdr __user *)ptr;
83982@@ -219,7 +219,7 @@ Efault:
83983
83984 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
83985 {
83986- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
83987+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
83988 struct compat_cmsghdr cmhdr;
83989 struct compat_timeval ctv;
83990 struct compat_timespec cts[3];
83991@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
83992
83993 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
83994 {
83995- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
83996+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
83997 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
83998 int fdnum = scm->fp->count;
83999 struct file **fp = scm->fp->fp;
84000@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
84001 return -EFAULT;
84002 old_fs = get_fs();
84003 set_fs(KERNEL_DS);
84004- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
84005+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
84006 set_fs(old_fs);
84007
84008 return err;
84009@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
84010 len = sizeof(ktime);
84011 old_fs = get_fs();
84012 set_fs(KERNEL_DS);
84013- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
84014+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
84015 set_fs(old_fs);
84016
84017 if (!err) {
84018@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
84019 case MCAST_JOIN_GROUP:
84020 case MCAST_LEAVE_GROUP:
84021 {
84022- struct compat_group_req __user *gr32 = (void *)optval;
84023+ struct compat_group_req __user *gr32 = (void __user *)optval;
84024 struct group_req __user *kgr =
84025 compat_alloc_user_space(sizeof(struct group_req));
84026 u32 interface;
84027@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
84028 case MCAST_BLOCK_SOURCE:
84029 case MCAST_UNBLOCK_SOURCE:
84030 {
84031- struct compat_group_source_req __user *gsr32 = (void *)optval;
84032+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
84033 struct group_source_req __user *kgsr = compat_alloc_user_space(
84034 sizeof(struct group_source_req));
84035 u32 interface;
84036@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
84037 }
84038 case MCAST_MSFILTER:
84039 {
84040- struct compat_group_filter __user *gf32 = (void *)optval;
84041+ struct compat_group_filter __user *gf32 = (void __user *)optval;
84042 struct group_filter __user *kgf;
84043 u32 interface, fmode, numsrc;
84044
84045@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
84046 char __user *optval, int __user *optlen,
84047 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
84048 {
84049- struct compat_group_filter __user *gf32 = (void *)optval;
84050+ struct compat_group_filter __user *gf32 = (void __user *)optval;
84051 struct group_filter __user *kgf;
84052 int __user *koptlen;
84053 u32 interface, fmode, numsrc;
84054@@ -796,7 +796,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
84055
84056 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
84057 return -EINVAL;
84058- if (copy_from_user(a, args, nas[call]))
84059+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
84060 return -EFAULT;
84061 a0 = a[0];
84062 a1 = a[1];
84063diff --git a/net/core/datagram.c b/net/core/datagram.c
84064index 368f9c3..f82d4a3 100644
84065--- a/net/core/datagram.c
84066+++ b/net/core/datagram.c
84067@@ -289,7 +289,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
84068 }
84069
84070 kfree_skb(skb);
84071- atomic_inc(&sk->sk_drops);
84072+ atomic_inc_unchecked(&sk->sk_drops);
84073 sk_mem_reclaim_partial(sk);
84074
84075 return err;
84076diff --git a/net/core/dev.c b/net/core/dev.c
84077index f64e439..8f959e6 100644
84078--- a/net/core/dev.c
84079+++ b/net/core/dev.c
84080@@ -1250,9 +1250,13 @@ void dev_load(struct net *net, const char *name)
84081 if (no_module && capable(CAP_NET_ADMIN))
84082 no_module = request_module("netdev-%s", name);
84083 if (no_module && capable(CAP_SYS_MODULE)) {
84084+#ifdef CONFIG_GRKERNSEC_MODHARDEN
84085+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
84086+#else
84087 if (!request_module("%s", name))
84088 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
84089 name);
84090+#endif
84091 }
84092 }
84093 EXPORT_SYMBOL(dev_load);
84094@@ -1715,7 +1719,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
84095 {
84096 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
84097 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
84098- atomic_long_inc(&dev->rx_dropped);
84099+ atomic_long_inc_unchecked(&dev->rx_dropped);
84100 kfree_skb(skb);
84101 return NET_RX_DROP;
84102 }
84103@@ -1725,7 +1729,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
84104 nf_reset(skb);
84105
84106 if (unlikely(!is_skb_forwardable(dev, skb))) {
84107- atomic_long_inc(&dev->rx_dropped);
84108+ atomic_long_inc_unchecked(&dev->rx_dropped);
84109 kfree_skb(skb);
84110 return NET_RX_DROP;
84111 }
84112@@ -2180,7 +2184,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
84113
84114 struct dev_gso_cb {
84115 void (*destructor)(struct sk_buff *skb);
84116-};
84117+} __no_const;
84118
84119 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
84120
84121@@ -3053,7 +3057,7 @@ enqueue:
84122
84123 local_irq_restore(flags);
84124
84125- atomic_long_inc(&skb->dev->rx_dropped);
84126+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
84127 kfree_skb(skb);
84128 return NET_RX_DROP;
84129 }
84130@@ -3125,7 +3129,7 @@ int netif_rx_ni(struct sk_buff *skb)
84131 }
84132 EXPORT_SYMBOL(netif_rx_ni);
84133
84134-static void net_tx_action(struct softirq_action *h)
84135+static void net_tx_action(void)
84136 {
84137 struct softnet_data *sd = &__get_cpu_var(softnet_data);
84138
84139@@ -3456,7 +3460,7 @@ ncls:
84140 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
84141 } else {
84142 drop:
84143- atomic_long_inc(&skb->dev->rx_dropped);
84144+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
84145 kfree_skb(skb);
84146 /* Jamal, now you will not able to escape explaining
84147 * me how you were going to use this. :-)
84148@@ -4039,7 +4043,7 @@ void netif_napi_del(struct napi_struct *napi)
84149 }
84150 EXPORT_SYMBOL(netif_napi_del);
84151
84152-static void net_rx_action(struct softirq_action *h)
84153+static void net_rx_action(void)
84154 {
84155 struct softnet_data *sd = &__get_cpu_var(softnet_data);
84156 unsigned long time_limit = jiffies + 2;
84157@@ -4523,8 +4527,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
84158 else
84159 seq_printf(seq, "%04x", ntohs(pt->type));
84160
84161+#ifdef CONFIG_GRKERNSEC_HIDESYM
84162+ seq_printf(seq, " %-8s %p\n",
84163+ pt->dev ? pt->dev->name : "", NULL);
84164+#else
84165 seq_printf(seq, " %-8s %pF\n",
84166 pt->dev ? pt->dev->name : "", pt->func);
84167+#endif
84168 }
84169
84170 return 0;
84171@@ -6096,7 +6105,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
84172 } else {
84173 netdev_stats_to_stats64(storage, &dev->stats);
84174 }
84175- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
84176+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
84177 return storage;
84178 }
84179 EXPORT_SYMBOL(dev_get_stats);
84180diff --git a/net/core/flow.c b/net/core/flow.c
84181index b0901ee..7d3c2ca 100644
84182--- a/net/core/flow.c
84183+++ b/net/core/flow.c
84184@@ -61,7 +61,7 @@ struct flow_cache {
84185 struct timer_list rnd_timer;
84186 };
84187
84188-atomic_t flow_cache_genid = ATOMIC_INIT(0);
84189+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
84190 EXPORT_SYMBOL(flow_cache_genid);
84191 static struct flow_cache flow_cache_global;
84192 static struct kmem_cache *flow_cachep __read_mostly;
84193@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
84194
84195 static int flow_entry_valid(struct flow_cache_entry *fle)
84196 {
84197- if (atomic_read(&flow_cache_genid) != fle->genid)
84198+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
84199 return 0;
84200 if (fle->object && !fle->object->ops->check(fle->object))
84201 return 0;
84202@@ -259,7 +259,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
84203 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
84204 fcp->hash_count++;
84205 }
84206- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
84207+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
84208 flo = fle->object;
84209 if (!flo)
84210 goto ret_object;
84211@@ -280,7 +280,7 @@ nocache:
84212 }
84213 flo = resolver(net, key, family, dir, flo, ctx);
84214 if (fle) {
84215- fle->genid = atomic_read(&flow_cache_genid);
84216+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
84217 if (!IS_ERR(flo))
84218 fle->object = flo;
84219 else
84220diff --git a/net/core/iovec.c b/net/core/iovec.c
84221index 7e7aeb0..2a998cb 100644
84222--- a/net/core/iovec.c
84223+++ b/net/core/iovec.c
84224@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
84225 if (m->msg_namelen) {
84226 if (mode == VERIFY_READ) {
84227 void __user *namep;
84228- namep = (void __user __force *) m->msg_name;
84229+ namep = (void __force_user *) m->msg_name;
84230 err = move_addr_to_kernel(namep, m->msg_namelen,
84231 address);
84232 if (err < 0)
84233@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
84234 }
84235
84236 size = m->msg_iovlen * sizeof(struct iovec);
84237- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
84238+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
84239 return -EFAULT;
84240
84241 m->msg_iov = iov;
84242diff --git a/net/core/neighbour.c b/net/core/neighbour.c
84243index c815f28..e6403f2 100644
84244--- a/net/core/neighbour.c
84245+++ b/net/core/neighbour.c
84246@@ -2776,7 +2776,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
84247 size_t *lenp, loff_t *ppos)
84248 {
84249 int size, ret;
84250- ctl_table tmp = *ctl;
84251+ ctl_table_no_const tmp = *ctl;
84252
84253 tmp.extra1 = &zero;
84254 tmp.extra2 = &unres_qlen_max;
84255diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
84256index 28c5f5a..7edf2e2 100644
84257--- a/net/core/net-sysfs.c
84258+++ b/net/core/net-sysfs.c
84259@@ -1455,7 +1455,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
84260 }
84261 EXPORT_SYMBOL(netdev_class_remove_file);
84262
84263-int netdev_kobject_init(void)
84264+int __init netdev_kobject_init(void)
84265 {
84266 kobj_ns_type_register(&net_ns_type_operations);
84267 return class_register(&net_class);
84268diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
84269index 8acce01..2e306bb 100644
84270--- a/net/core/net_namespace.c
84271+++ b/net/core/net_namespace.c
84272@@ -442,7 +442,7 @@ static int __register_pernet_operations(struct list_head *list,
84273 int error;
84274 LIST_HEAD(net_exit_list);
84275
84276- list_add_tail(&ops->list, list);
84277+ pax_list_add_tail((struct list_head *)&ops->list, list);
84278 if (ops->init || (ops->id && ops->size)) {
84279 for_each_net(net) {
84280 error = ops_init(ops, net);
84281@@ -455,7 +455,7 @@ static int __register_pernet_operations(struct list_head *list,
84282
84283 out_undo:
84284 /* If I have an error cleanup all namespaces I initialized */
84285- list_del(&ops->list);
84286+ pax_list_del((struct list_head *)&ops->list);
84287 ops_exit_list(ops, &net_exit_list);
84288 ops_free_list(ops, &net_exit_list);
84289 return error;
84290@@ -466,7 +466,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
84291 struct net *net;
84292 LIST_HEAD(net_exit_list);
84293
84294- list_del(&ops->list);
84295+ pax_list_del((struct list_head *)&ops->list);
84296 for_each_net(net)
84297 list_add_tail(&net->exit_list, &net_exit_list);
84298 ops_exit_list(ops, &net_exit_list);
84299@@ -600,7 +600,7 @@ int register_pernet_device(struct pernet_operations *ops)
84300 mutex_lock(&net_mutex);
84301 error = register_pernet_operations(&pernet_list, ops);
84302 if (!error && (first_device == &pernet_list))
84303- first_device = &ops->list;
84304+ first_device = (struct list_head *)&ops->list;
84305 mutex_unlock(&net_mutex);
84306 return error;
84307 }
84308diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
84309index 1868625..e2261f5 100644
84310--- a/net/core/rtnetlink.c
84311+++ b/net/core/rtnetlink.c
84312@@ -58,7 +58,7 @@ struct rtnl_link {
84313 rtnl_doit_func doit;
84314 rtnl_dumpit_func dumpit;
84315 rtnl_calcit_func calcit;
84316-};
84317+} __no_const;
84318
84319 static DEFINE_MUTEX(rtnl_mutex);
84320
84321@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
84322 if (rtnl_link_ops_get(ops->kind))
84323 return -EEXIST;
84324
84325- if (!ops->dellink)
84326- ops->dellink = unregister_netdevice_queue;
84327+ if (!ops->dellink) {
84328+ pax_open_kernel();
84329+ *(void **)&ops->dellink = unregister_netdevice_queue;
84330+ pax_close_kernel();
84331+ }
84332
84333- list_add_tail(&ops->list, &link_ops);
84334+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
84335 return 0;
84336 }
84337 EXPORT_SYMBOL_GPL(__rtnl_link_register);
84338@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
84339 for_each_net(net) {
84340 __rtnl_kill_links(net, ops);
84341 }
84342- list_del(&ops->list);
84343+ pax_list_del((struct list_head *)&ops->list);
84344 }
84345 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
84346
84347@@ -976,6 +979,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
84348 * report anything.
84349 */
84350 ivi.spoofchk = -1;
84351+ memset(ivi.mac, 0, sizeof(ivi.mac));
84352 if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
84353 break;
84354 vf_mac.vf =
84355diff --git a/net/core/scm.c b/net/core/scm.c
84356index 905dcc6..14ee2d6 100644
84357--- a/net/core/scm.c
84358+++ b/net/core/scm.c
84359@@ -224,7 +224,7 @@ EXPORT_SYMBOL(__scm_send);
84360 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
84361 {
84362 struct cmsghdr __user *cm
84363- = (__force struct cmsghdr __user *)msg->msg_control;
84364+ = (struct cmsghdr __force_user *)msg->msg_control;
84365 struct cmsghdr cmhdr;
84366 int cmlen = CMSG_LEN(len);
84367 int err;
84368@@ -247,7 +247,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
84369 err = -EFAULT;
84370 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
84371 goto out;
84372- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
84373+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
84374 goto out;
84375 cmlen = CMSG_SPACE(len);
84376 if (msg->msg_controllen < cmlen)
84377@@ -263,7 +263,7 @@ EXPORT_SYMBOL(put_cmsg);
84378 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
84379 {
84380 struct cmsghdr __user *cm
84381- = (__force struct cmsghdr __user*)msg->msg_control;
84382+ = (struct cmsghdr __force_user *)msg->msg_control;
84383
84384 int fdmax = 0;
84385 int fdnum = scm->fp->count;
84386@@ -283,7 +283,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
84387 if (fdnum < fdmax)
84388 fdmax = fdnum;
84389
84390- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
84391+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
84392 i++, cmfptr++)
84393 {
84394 struct socket *sock;
84395diff --git a/net/core/sock.c b/net/core/sock.c
84396index bc131d4..029e378 100644
84397--- a/net/core/sock.c
84398+++ b/net/core/sock.c
84399@@ -388,7 +388,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
84400 struct sk_buff_head *list = &sk->sk_receive_queue;
84401
84402 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
84403- atomic_inc(&sk->sk_drops);
84404+ atomic_inc_unchecked(&sk->sk_drops);
84405 trace_sock_rcvqueue_full(sk, skb);
84406 return -ENOMEM;
84407 }
84408@@ -398,7 +398,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
84409 return err;
84410
84411 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
84412- atomic_inc(&sk->sk_drops);
84413+ atomic_inc_unchecked(&sk->sk_drops);
84414 return -ENOBUFS;
84415 }
84416
84417@@ -418,7 +418,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
84418 skb_dst_force(skb);
84419
84420 spin_lock_irqsave(&list->lock, flags);
84421- skb->dropcount = atomic_read(&sk->sk_drops);
84422+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
84423 __skb_queue_tail(list, skb);
84424 spin_unlock_irqrestore(&list->lock, flags);
84425
84426@@ -438,7 +438,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
84427 skb->dev = NULL;
84428
84429 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
84430- atomic_inc(&sk->sk_drops);
84431+ atomic_inc_unchecked(&sk->sk_drops);
84432 goto discard_and_relse;
84433 }
84434 if (nested)
84435@@ -456,7 +456,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
84436 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
84437 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
84438 bh_unlock_sock(sk);
84439- atomic_inc(&sk->sk_drops);
84440+ atomic_inc_unchecked(&sk->sk_drops);
84441 goto discard_and_relse;
84442 }
84443
84444@@ -930,12 +930,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
84445 struct timeval tm;
84446 } v;
84447
84448- int lv = sizeof(int);
84449- int len;
84450+ unsigned int lv = sizeof(int);
84451+ unsigned int len;
84452
84453 if (get_user(len, optlen))
84454 return -EFAULT;
84455- if (len < 0)
84456+ if (len > INT_MAX)
84457 return -EINVAL;
84458
84459 memset(&v, 0, sizeof(v));
84460@@ -1083,11 +1083,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
84461
84462 case SO_PEERNAME:
84463 {
84464- char address[128];
84465+ char address[_K_SS_MAXSIZE];
84466
84467 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
84468 return -ENOTCONN;
84469- if (lv < len)
84470+ if (lv < len || sizeof address < len)
84471 return -EINVAL;
84472 if (copy_to_user(optval, address, len))
84473 return -EFAULT;
84474@@ -1146,7 +1146,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
84475
84476 if (len > lv)
84477 len = lv;
84478- if (copy_to_user(optval, &v, len))
84479+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
84480 return -EFAULT;
84481 lenout:
84482 if (put_user(len, optlen))
84483@@ -2276,7 +2276,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
84484 */
84485 smp_wmb();
84486 atomic_set(&sk->sk_refcnt, 1);
84487- atomic_set(&sk->sk_drops, 0);
84488+ atomic_set_unchecked(&sk->sk_drops, 0);
84489 }
84490 EXPORT_SYMBOL(sock_init_data);
84491
84492diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
84493index 750f44f..922399c 100644
84494--- a/net/core/sock_diag.c
84495+++ b/net/core/sock_diag.c
84496@@ -9,26 +9,33 @@
84497 #include <linux/inet_diag.h>
84498 #include <linux/sock_diag.h>
84499
84500-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
84501+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
84502 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
84503 static DEFINE_MUTEX(sock_diag_table_mutex);
84504
84505 int sock_diag_check_cookie(void *sk, __u32 *cookie)
84506 {
84507+#ifndef CONFIG_GRKERNSEC_HIDESYM
84508 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
84509 cookie[1] != INET_DIAG_NOCOOKIE) &&
84510 ((u32)(unsigned long)sk != cookie[0] ||
84511 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
84512 return -ESTALE;
84513 else
84514+#endif
84515 return 0;
84516 }
84517 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
84518
84519 void sock_diag_save_cookie(void *sk, __u32 *cookie)
84520 {
84521+#ifdef CONFIG_GRKERNSEC_HIDESYM
84522+ cookie[0] = 0;
84523+ cookie[1] = 0;
84524+#else
84525 cookie[0] = (u32)(unsigned long)sk;
84526 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
84527+#endif
84528 }
84529 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
84530
84531@@ -75,8 +82,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
84532 mutex_lock(&sock_diag_table_mutex);
84533 if (sock_diag_handlers[hndl->family])
84534 err = -EBUSY;
84535- else
84536+ else {
84537+ pax_open_kernel();
84538 sock_diag_handlers[hndl->family] = hndl;
84539+ pax_close_kernel();
84540+ }
84541 mutex_unlock(&sock_diag_table_mutex);
84542
84543 return err;
84544@@ -92,26 +102,13 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
84545
84546 mutex_lock(&sock_diag_table_mutex);
84547 BUG_ON(sock_diag_handlers[family] != hnld);
84548+ pax_open_kernel();
84549 sock_diag_handlers[family] = NULL;
84550+ pax_close_kernel();
84551 mutex_unlock(&sock_diag_table_mutex);
84552 }
84553 EXPORT_SYMBOL_GPL(sock_diag_unregister);
84554
84555-static const inline struct sock_diag_handler *sock_diag_lock_handler(int family)
84556-{
84557- if (sock_diag_handlers[family] == NULL)
84558- request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
84559- NETLINK_SOCK_DIAG, family);
84560-
84561- mutex_lock(&sock_diag_table_mutex);
84562- return sock_diag_handlers[family];
84563-}
84564-
84565-static inline void sock_diag_unlock_handler(const struct sock_diag_handler *h)
84566-{
84567- mutex_unlock(&sock_diag_table_mutex);
84568-}
84569-
84570 static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
84571 {
84572 int err;
84573@@ -124,12 +121,17 @@ static int __sock_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
84574 if (req->sdiag_family >= AF_MAX)
84575 return -EINVAL;
84576
84577- hndl = sock_diag_lock_handler(req->sdiag_family);
84578+ if (sock_diag_handlers[req->sdiag_family] == NULL)
84579+ request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK,
84580+ NETLINK_SOCK_DIAG, req->sdiag_family);
84581+
84582+ mutex_lock(&sock_diag_table_mutex);
84583+ hndl = sock_diag_handlers[req->sdiag_family];
84584 if (hndl == NULL)
84585 err = -ENOENT;
84586 else
84587 err = hndl->dump(skb, nlh);
84588- sock_diag_unlock_handler(hndl);
84589+ mutex_unlock(&sock_diag_table_mutex);
84590
84591 return err;
84592 }
84593diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
84594index d1b0804..4aed0a5 100644
84595--- a/net/core/sysctl_net_core.c
84596+++ b/net/core/sysctl_net_core.c
84597@@ -26,7 +26,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
84598 {
84599 unsigned int orig_size, size;
84600 int ret, i;
84601- ctl_table tmp = {
84602+ ctl_table_no_const tmp = {
84603 .data = &size,
84604 .maxlen = sizeof(size),
84605 .mode = table->mode
84606@@ -205,13 +205,12 @@ static struct ctl_table netns_core_table[] = {
84607
84608 static __net_init int sysctl_core_net_init(struct net *net)
84609 {
84610- struct ctl_table *tbl;
84611+ ctl_table_no_const *tbl = NULL;
84612
84613 net->core.sysctl_somaxconn = SOMAXCONN;
84614
84615- tbl = netns_core_table;
84616 if (!net_eq(net, &init_net)) {
84617- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
84618+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
84619 if (tbl == NULL)
84620 goto err_dup;
84621
84622@@ -221,16 +220,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
84623 if (net->user_ns != &init_user_ns) {
84624 tbl[0].procname = NULL;
84625 }
84626- }
84627-
84628- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
84629+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
84630+ } else
84631+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
84632 if (net->core.sysctl_hdr == NULL)
84633 goto err_reg;
84634
84635 return 0;
84636
84637 err_reg:
84638- if (tbl != netns_core_table)
84639+ if (tbl)
84640 kfree(tbl);
84641 err_dup:
84642 return -ENOMEM;
84643@@ -246,7 +245,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
84644 kfree(tbl);
84645 }
84646
84647-static __net_initdata struct pernet_operations sysctl_core_ops = {
84648+static __net_initconst struct pernet_operations sysctl_core_ops = {
84649 .init = sysctl_core_net_init,
84650 .exit = sysctl_core_net_exit,
84651 };
84652diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
84653index 1b588e2..21291f1 100644
84654--- a/net/dcb/dcbnl.c
84655+++ b/net/dcb/dcbnl.c
84656@@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,
84657 if (!netdev->dcbnl_ops->getpermhwaddr)
84658 return -EOPNOTSUPP;
84659
84660+ memset(perm_addr, 0, sizeof(perm_addr));
84661 netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);
84662
84663 return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr);
84664@@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
84665
84666 if (ops->ieee_getets) {
84667 struct ieee_ets ets;
84668+ memset(&ets, 0, sizeof(ets));
84669 err = ops->ieee_getets(netdev, &ets);
84670 if (!err &&
84671 nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets))
84672@@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
84673
84674 if (ops->ieee_getmaxrate) {
84675 struct ieee_maxrate maxrate;
84676+ memset(&maxrate, 0, sizeof(maxrate));
84677 err = ops->ieee_getmaxrate(netdev, &maxrate);
84678 if (!err) {
84679 err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE,
84680@@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
84681
84682 if (ops->ieee_getpfc) {
84683 struct ieee_pfc pfc;
84684+ memset(&pfc, 0, sizeof(pfc));
84685 err = ops->ieee_getpfc(netdev, &pfc);
84686 if (!err &&
84687 nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc))
84688@@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
84689 /* get peer info if available */
84690 if (ops->ieee_peer_getets) {
84691 struct ieee_ets ets;
84692+ memset(&ets, 0, sizeof(ets));
84693 err = ops->ieee_peer_getets(netdev, &ets);
84694 if (!err &&
84695 nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets))
84696@@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
84697
84698 if (ops->ieee_peer_getpfc) {
84699 struct ieee_pfc pfc;
84700+ memset(&pfc, 0, sizeof(pfc));
84701 err = ops->ieee_peer_getpfc(netdev, &pfc);
84702 if (!err &&
84703 nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc))
84704@@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
84705 /* peer info if available */
84706 if (ops->cee_peer_getpg) {
84707 struct cee_pg pg;
84708+ memset(&pg, 0, sizeof(pg));
84709 err = ops->cee_peer_getpg(netdev, &pg);
84710 if (!err &&
84711 nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg))
84712@@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
84713
84714 if (ops->cee_peer_getpfc) {
84715 struct cee_pfc pfc;
84716+ memset(&pfc, 0, sizeof(pfc));
84717 err = ops->cee_peer_getpfc(netdev, &pfc);
84718 if (!err &&
84719 nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc))
84720diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
84721index 307c322..78a4c6f 100644
84722--- a/net/decnet/af_decnet.c
84723+++ b/net/decnet/af_decnet.c
84724@@ -468,6 +468,7 @@ static struct proto dn_proto = {
84725 .sysctl_rmem = sysctl_decnet_rmem,
84726 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
84727 .obj_size = sizeof(struct dn_sock),
84728+ .slab_flags = SLAB_USERCOPY,
84729 };
84730
84731 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
84732diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
84733index a55eecc..dd8428c 100644
84734--- a/net/decnet/sysctl_net_decnet.c
84735+++ b/net/decnet/sysctl_net_decnet.c
84736@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
84737
84738 if (len > *lenp) len = *lenp;
84739
84740- if (copy_to_user(buffer, addr, len))
84741+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
84742 return -EFAULT;
84743
84744 *lenp = len;
84745@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
84746
84747 if (len > *lenp) len = *lenp;
84748
84749- if (copy_to_user(buffer, devname, len))
84750+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
84751 return -EFAULT;
84752
84753 *lenp = len;
84754diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
84755index fcf104e..95552d4 100644
84756--- a/net/ipv4/af_inet.c
84757+++ b/net/ipv4/af_inet.c
84758@@ -1717,13 +1717,9 @@ static int __init inet_init(void)
84759
84760 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > sizeof(dummy_skb->cb));
84761
84762- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
84763- if (!sysctl_local_reserved_ports)
84764- goto out;
84765-
84766 rc = proto_register(&tcp_prot, 1);
84767 if (rc)
84768- goto out_free_reserved_ports;
84769+ goto out;
84770
84771 rc = proto_register(&udp_prot, 1);
84772 if (rc)
84773@@ -1832,8 +1828,6 @@ out_unregister_udp_proto:
84774 proto_unregister(&udp_prot);
84775 out_unregister_tcp_proto:
84776 proto_unregister(&tcp_prot);
84777-out_free_reserved_ports:
84778- kfree(sysctl_local_reserved_ports);
84779 goto out;
84780 }
84781
84782diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
84783index a69b4e4..dbccba5 100644
84784--- a/net/ipv4/ah4.c
84785+++ b/net/ipv4/ah4.c
84786@@ -421,7 +421,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
84787 return;
84788
84789 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
84790- atomic_inc(&flow_cache_genid);
84791+ atomic_inc_unchecked(&flow_cache_genid);
84792 rt_genid_bump(net);
84793
84794 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
84795diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
84796index a8e4f26..25e5f40 100644
84797--- a/net/ipv4/devinet.c
84798+++ b/net/ipv4/devinet.c
84799@@ -1763,7 +1763,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
84800 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
84801 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
84802
84803-static struct devinet_sysctl_table {
84804+static const struct devinet_sysctl_table {
84805 struct ctl_table_header *sysctl_header;
84806 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
84807 } devinet_sysctl = {
84808@@ -1881,7 +1881,7 @@ static __net_init int devinet_init_net(struct net *net)
84809 int err;
84810 struct ipv4_devconf *all, *dflt;
84811 #ifdef CONFIG_SYSCTL
84812- struct ctl_table *tbl = ctl_forward_entry;
84813+ ctl_table_no_const *tbl = NULL;
84814 struct ctl_table_header *forw_hdr;
84815 #endif
84816
84817@@ -1899,7 +1899,7 @@ static __net_init int devinet_init_net(struct net *net)
84818 goto err_alloc_dflt;
84819
84820 #ifdef CONFIG_SYSCTL
84821- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
84822+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
84823 if (tbl == NULL)
84824 goto err_alloc_ctl;
84825
84826@@ -1919,7 +1919,10 @@ static __net_init int devinet_init_net(struct net *net)
84827 goto err_reg_dflt;
84828
84829 err = -ENOMEM;
84830- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
84831+ if (!net_eq(net, &init_net))
84832+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
84833+ else
84834+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
84835 if (forw_hdr == NULL)
84836 goto err_reg_ctl;
84837 net->ipv4.forw_hdr = forw_hdr;
84838@@ -1935,8 +1938,7 @@ err_reg_ctl:
84839 err_reg_dflt:
84840 __devinet_sysctl_unregister(all);
84841 err_reg_all:
84842- if (tbl != ctl_forward_entry)
84843- kfree(tbl);
84844+ kfree(tbl);
84845 err_alloc_ctl:
84846 #endif
84847 if (dflt != &ipv4_devconf_dflt)
84848diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
84849index 3b4f0cd..8cb864c 100644
84850--- a/net/ipv4/esp4.c
84851+++ b/net/ipv4/esp4.c
84852@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
84853 return;
84854
84855 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
84856- atomic_inc(&flow_cache_genid);
84857+ atomic_inc_unchecked(&flow_cache_genid);
84858 rt_genid_bump(net);
84859
84860 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
84861diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
84862index 5cd75e2..f57ef39 100644
84863--- a/net/ipv4/fib_frontend.c
84864+++ b/net/ipv4/fib_frontend.c
84865@@ -1020,12 +1020,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
84866 #ifdef CONFIG_IP_ROUTE_MULTIPATH
84867 fib_sync_up(dev);
84868 #endif
84869- atomic_inc(&net->ipv4.dev_addr_genid);
84870+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
84871 rt_cache_flush(dev_net(dev));
84872 break;
84873 case NETDEV_DOWN:
84874 fib_del_ifaddr(ifa, NULL);
84875- atomic_inc(&net->ipv4.dev_addr_genid);
84876+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
84877 if (ifa->ifa_dev->ifa_list == NULL) {
84878 /* Last address was deleted from this interface.
84879 * Disable IP.
84880@@ -1061,7 +1061,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
84881 #ifdef CONFIG_IP_ROUTE_MULTIPATH
84882 fib_sync_up(dev);
84883 #endif
84884- atomic_inc(&net->ipv4.dev_addr_genid);
84885+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
84886 rt_cache_flush(net);
84887 break;
84888 case NETDEV_DOWN:
84889diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
84890index 4797a80..2bd54e9 100644
84891--- a/net/ipv4/fib_semantics.c
84892+++ b/net/ipv4/fib_semantics.c
84893@@ -767,7 +767,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
84894 nh->nh_saddr = inet_select_addr(nh->nh_dev,
84895 nh->nh_gw,
84896 nh->nh_parent->fib_scope);
84897- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
84898+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
84899
84900 return nh->nh_saddr;
84901 }
84902diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
84903index d0670f0..744ac80 100644
84904--- a/net/ipv4/inet_connection_sock.c
84905+++ b/net/ipv4/inet_connection_sock.c
84906@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
84907 .range = { 32768, 61000 },
84908 };
84909
84910-unsigned long *sysctl_local_reserved_ports;
84911+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
84912 EXPORT_SYMBOL(sysctl_local_reserved_ports);
84913
84914 void inet_get_local_port_range(int *low, int *high)
84915diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
84916index fa3ae81..0dbe6b8 100644
84917--- a/net/ipv4/inet_hashtables.c
84918+++ b/net/ipv4/inet_hashtables.c
84919@@ -18,12 +18,15 @@
84920 #include <linux/sched.h>
84921 #include <linux/slab.h>
84922 #include <linux/wait.h>
84923+#include <linux/security.h>
84924
84925 #include <net/inet_connection_sock.h>
84926 #include <net/inet_hashtables.h>
84927 #include <net/secure_seq.h>
84928 #include <net/ip.h>
84929
84930+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
84931+
84932 /*
84933 * Allocate and initialize a new local port bind bucket.
84934 * The bindhash mutex for snum's hash chain must be held here.
84935@@ -540,6 +543,8 @@ ok:
84936 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
84937 spin_unlock(&head->lock);
84938
84939+ gr_update_task_in_ip_table(current, inet_sk(sk));
84940+
84941 if (tw) {
84942 inet_twsk_deschedule(tw, death_row);
84943 while (twrefcnt) {
84944diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
84945index 000e3d2..5472da3 100644
84946--- a/net/ipv4/inetpeer.c
84947+++ b/net/ipv4/inetpeer.c
84948@@ -503,8 +503,8 @@ relookup:
84949 if (p) {
84950 p->daddr = *daddr;
84951 atomic_set(&p->refcnt, 1);
84952- atomic_set(&p->rid, 0);
84953- atomic_set(&p->ip_id_count,
84954+ atomic_set_unchecked(&p->rid, 0);
84955+ atomic_set_unchecked(&p->ip_id_count,
84956 (daddr->family == AF_INET) ?
84957 secure_ip_id(daddr->addr.a4) :
84958 secure_ipv6_id(daddr->addr.a6));
84959diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
84960index eb9d63a..31c5372 100644
84961--- a/net/ipv4/ip_fragment.c
84962+++ b/net/ipv4/ip_fragment.c
84963@@ -322,7 +322,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
84964 return 0;
84965
84966 start = qp->rid;
84967- end = atomic_inc_return(&peer->rid);
84968+ end = atomic_inc_return_unchecked(&peer->rid);
84969 qp->rid = end;
84970
84971 rc = qp->q.fragments && (end - start) > max;
84972@@ -789,12 +789,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
84973
84974 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
84975 {
84976- struct ctl_table *table;
84977+ ctl_table_no_const *table = NULL;
84978 struct ctl_table_header *hdr;
84979
84980- table = ip4_frags_ns_ctl_table;
84981 if (!net_eq(net, &init_net)) {
84982- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
84983+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
84984 if (table == NULL)
84985 goto err_alloc;
84986
84987@@ -805,9 +804,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
84988 /* Don't export sysctls to unprivileged users */
84989 if (net->user_ns != &init_user_ns)
84990 table[0].procname = NULL;
84991- }
84992+ hdr = register_net_sysctl(net, "net/ipv4", table);
84993+ } else
84994+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
84995
84996- hdr = register_net_sysctl(net, "net/ipv4", table);
84997 if (hdr == NULL)
84998 goto err_reg;
84999
85000@@ -815,8 +815,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
85001 return 0;
85002
85003 err_reg:
85004- if (!net_eq(net, &init_net))
85005- kfree(table);
85006+ kfree(table);
85007 err_alloc:
85008 return -ENOMEM;
85009 }
85010diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
85011index e81b1ca..6f3b5b9 100644
85012--- a/net/ipv4/ip_gre.c
85013+++ b/net/ipv4/ip_gre.c
85014@@ -124,7 +124,7 @@ static bool log_ecn_error = true;
85015 module_param(log_ecn_error, bool, 0644);
85016 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
85017
85018-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
85019+static struct rtnl_link_ops ipgre_link_ops;
85020 static int ipgre_tunnel_init(struct net_device *dev);
85021 static void ipgre_tunnel_setup(struct net_device *dev);
85022 static int ipgre_tunnel_bind_dev(struct net_device *dev);
85023@@ -1756,7 +1756,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
85024 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
85025 };
85026
85027-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
85028+static struct rtnl_link_ops ipgre_link_ops = {
85029 .kind = "gre",
85030 .maxtype = IFLA_GRE_MAX,
85031 .policy = ipgre_policy,
85032@@ -1769,7 +1769,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
85033 .fill_info = ipgre_fill_info,
85034 };
85035
85036-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
85037+static struct rtnl_link_ops ipgre_tap_ops = {
85038 .kind = "gretap",
85039 .maxtype = IFLA_GRE_MAX,
85040 .policy = ipgre_policy,
85041diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
85042index d9c4f11..02b82db 100644
85043--- a/net/ipv4/ip_sockglue.c
85044+++ b/net/ipv4/ip_sockglue.c
85045@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
85046 len = min_t(unsigned int, len, opt->optlen);
85047 if (put_user(len, optlen))
85048 return -EFAULT;
85049- if (copy_to_user(optval, opt->__data, len))
85050+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
85051+ copy_to_user(optval, opt->__data, len))
85052 return -EFAULT;
85053 return 0;
85054 }
85055@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
85056 if (sk->sk_type != SOCK_STREAM)
85057 return -ENOPROTOOPT;
85058
85059- msg.msg_control = optval;
85060+ msg.msg_control = (void __force_kernel *)optval;
85061 msg.msg_controllen = len;
85062 msg.msg_flags = flags;
85063
85064diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
85065index c3a4233..1412161 100644
85066--- a/net/ipv4/ip_vti.c
85067+++ b/net/ipv4/ip_vti.c
85068@@ -47,7 +47,7 @@
85069 #define HASH_SIZE 16
85070 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
85071
85072-static struct rtnl_link_ops vti_link_ops __read_mostly;
85073+static struct rtnl_link_ops vti_link_ops;
85074
85075 static int vti_net_id __read_mostly;
85076 struct vti_net {
85077@@ -886,7 +886,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
85078 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
85079 };
85080
85081-static struct rtnl_link_ops vti_link_ops __read_mostly = {
85082+static struct rtnl_link_ops vti_link_ops = {
85083 .kind = "vti",
85084 .maxtype = IFLA_VTI_MAX,
85085 .policy = vti_policy,
85086diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
85087index 9a46dae..5f793a0 100644
85088--- a/net/ipv4/ipcomp.c
85089+++ b/net/ipv4/ipcomp.c
85090@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
85091 return;
85092
85093 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
85094- atomic_inc(&flow_cache_genid);
85095+ atomic_inc_unchecked(&flow_cache_genid);
85096 rt_genid_bump(net);
85097
85098 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
85099diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
85100index a2e50ae..e152b7c 100644
85101--- a/net/ipv4/ipconfig.c
85102+++ b/net/ipv4/ipconfig.c
85103@@ -323,7 +323,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
85104
85105 mm_segment_t oldfs = get_fs();
85106 set_fs(get_ds());
85107- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
85108+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
85109 set_fs(oldfs);
85110 return res;
85111 }
85112@@ -334,7 +334,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
85113
85114 mm_segment_t oldfs = get_fs();
85115 set_fs(get_ds());
85116- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
85117+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
85118 set_fs(oldfs);
85119 return res;
85120 }
85121@@ -345,7 +345,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
85122
85123 mm_segment_t oldfs = get_fs();
85124 set_fs(get_ds());
85125- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
85126+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
85127 set_fs(oldfs);
85128 return res;
85129 }
85130diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
85131index 191fc24..1b3b804 100644
85132--- a/net/ipv4/ipip.c
85133+++ b/net/ipv4/ipip.c
85134@@ -138,7 +138,7 @@ struct ipip_net {
85135 static int ipip_tunnel_init(struct net_device *dev);
85136 static void ipip_tunnel_setup(struct net_device *dev);
85137 static void ipip_dev_free(struct net_device *dev);
85138-static struct rtnl_link_ops ipip_link_ops __read_mostly;
85139+static struct rtnl_link_ops ipip_link_ops;
85140
85141 static struct rtnl_link_stats64 *ipip_get_stats64(struct net_device *dev,
85142 struct rtnl_link_stats64 *tot)
85143@@ -972,7 +972,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
85144 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
85145 };
85146
85147-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
85148+static struct rtnl_link_ops ipip_link_ops = {
85149 .kind = "ipip",
85150 .maxtype = IFLA_IPTUN_MAX,
85151 .policy = ipip_policy,
85152diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
85153index 3ea4127..849297b 100644
85154--- a/net/ipv4/netfilter/arp_tables.c
85155+++ b/net/ipv4/netfilter/arp_tables.c
85156@@ -879,14 +879,14 @@ static int compat_table_info(const struct xt_table_info *info,
85157 #endif
85158
85159 static int get_info(struct net *net, void __user *user,
85160- const int *len, int compat)
85161+ int len, int compat)
85162 {
85163 char name[XT_TABLE_MAXNAMELEN];
85164 struct xt_table *t;
85165 int ret;
85166
85167- if (*len != sizeof(struct arpt_getinfo)) {
85168- duprintf("length %u != %Zu\n", *len,
85169+ if (len != sizeof(struct arpt_getinfo)) {
85170+ duprintf("length %u != %Zu\n", len,
85171 sizeof(struct arpt_getinfo));
85172 return -EINVAL;
85173 }
85174@@ -923,7 +923,7 @@ static int get_info(struct net *net, void __user *user,
85175 info.size = private->size;
85176 strcpy(info.name, name);
85177
85178- if (copy_to_user(user, &info, *len) != 0)
85179+ if (copy_to_user(user, &info, len) != 0)
85180 ret = -EFAULT;
85181 else
85182 ret = 0;
85183@@ -1682,7 +1682,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
85184
85185 switch (cmd) {
85186 case ARPT_SO_GET_INFO:
85187- ret = get_info(sock_net(sk), user, len, 1);
85188+ ret = get_info(sock_net(sk), user, *len, 1);
85189 break;
85190 case ARPT_SO_GET_ENTRIES:
85191 ret = compat_get_entries(sock_net(sk), user, len);
85192@@ -1727,7 +1727,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
85193
85194 switch (cmd) {
85195 case ARPT_SO_GET_INFO:
85196- ret = get_info(sock_net(sk), user, len, 0);
85197+ ret = get_info(sock_net(sk), user, *len, 0);
85198 break;
85199
85200 case ARPT_SO_GET_ENTRIES:
85201diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
85202index 17c5e06..1b91206 100644
85203--- a/net/ipv4/netfilter/ip_tables.c
85204+++ b/net/ipv4/netfilter/ip_tables.c
85205@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
85206 #endif
85207
85208 static int get_info(struct net *net, void __user *user,
85209- const int *len, int compat)
85210+ int len, int compat)
85211 {
85212 char name[XT_TABLE_MAXNAMELEN];
85213 struct xt_table *t;
85214 int ret;
85215
85216- if (*len != sizeof(struct ipt_getinfo)) {
85217- duprintf("length %u != %zu\n", *len,
85218+ if (len != sizeof(struct ipt_getinfo)) {
85219+ duprintf("length %u != %zu\n", len,
85220 sizeof(struct ipt_getinfo));
85221 return -EINVAL;
85222 }
85223@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
85224 info.size = private->size;
85225 strcpy(info.name, name);
85226
85227- if (copy_to_user(user, &info, *len) != 0)
85228+ if (copy_to_user(user, &info, len) != 0)
85229 ret = -EFAULT;
85230 else
85231 ret = 0;
85232@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85233
85234 switch (cmd) {
85235 case IPT_SO_GET_INFO:
85236- ret = get_info(sock_net(sk), user, len, 1);
85237+ ret = get_info(sock_net(sk), user, *len, 1);
85238 break;
85239 case IPT_SO_GET_ENTRIES:
85240 ret = compat_get_entries(sock_net(sk), user, len);
85241@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85242
85243 switch (cmd) {
85244 case IPT_SO_GET_INFO:
85245- ret = get_info(sock_net(sk), user, len, 0);
85246+ ret = get_info(sock_net(sk), user, *len, 0);
85247 break;
85248
85249 case IPT_SO_GET_ENTRIES:
85250diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
85251index dc454cc..5bb917f 100644
85252--- a/net/ipv4/ping.c
85253+++ b/net/ipv4/ping.c
85254@@ -844,7 +844,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
85255 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
85256 0, sock_i_ino(sp),
85257 atomic_read(&sp->sk_refcnt), sp,
85258- atomic_read(&sp->sk_drops), len);
85259+ atomic_read_unchecked(&sp->sk_drops), len);
85260 }
85261
85262 static int ping_seq_show(struct seq_file *seq, void *v)
85263diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
85264index 6f08991..55867ad 100644
85265--- a/net/ipv4/raw.c
85266+++ b/net/ipv4/raw.c
85267@@ -311,7 +311,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
85268 int raw_rcv(struct sock *sk, struct sk_buff *skb)
85269 {
85270 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
85271- atomic_inc(&sk->sk_drops);
85272+ atomic_inc_unchecked(&sk->sk_drops);
85273 kfree_skb(skb);
85274 return NET_RX_DROP;
85275 }
85276@@ -747,16 +747,20 @@ static int raw_init(struct sock *sk)
85277
85278 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
85279 {
85280+ struct icmp_filter filter;
85281+
85282 if (optlen > sizeof(struct icmp_filter))
85283 optlen = sizeof(struct icmp_filter);
85284- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
85285+ if (copy_from_user(&filter, optval, optlen))
85286 return -EFAULT;
85287+ raw_sk(sk)->filter = filter;
85288 return 0;
85289 }
85290
85291 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
85292 {
85293 int len, ret = -EFAULT;
85294+ struct icmp_filter filter;
85295
85296 if (get_user(len, optlen))
85297 goto out;
85298@@ -766,8 +770,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
85299 if (len > sizeof(struct icmp_filter))
85300 len = sizeof(struct icmp_filter);
85301 ret = -EFAULT;
85302- if (put_user(len, optlen) ||
85303- copy_to_user(optval, &raw_sk(sk)->filter, len))
85304+ filter = raw_sk(sk)->filter;
85305+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
85306 goto out;
85307 ret = 0;
85308 out: return ret;
85309@@ -998,7 +1002,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
85310 0, 0L, 0,
85311 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
85312 0, sock_i_ino(sp),
85313- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
85314+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
85315 }
85316
85317 static int raw_seq_show(struct seq_file *seq, void *v)
85318diff --git a/net/ipv4/route.c b/net/ipv4/route.c
85319index a0fcc47..32e2c89 100644
85320--- a/net/ipv4/route.c
85321+++ b/net/ipv4/route.c
85322@@ -2552,34 +2552,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
85323 .maxlen = sizeof(int),
85324 .mode = 0200,
85325 .proc_handler = ipv4_sysctl_rtcache_flush,
85326+ .extra1 = &init_net,
85327 },
85328 { },
85329 };
85330
85331 static __net_init int sysctl_route_net_init(struct net *net)
85332 {
85333- struct ctl_table *tbl;
85334+ ctl_table_no_const *tbl = NULL;
85335
85336- tbl = ipv4_route_flush_table;
85337 if (!net_eq(net, &init_net)) {
85338- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
85339+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
85340 if (tbl == NULL)
85341 goto err_dup;
85342
85343 /* Don't export sysctls to unprivileged users */
85344 if (net->user_ns != &init_user_ns)
85345 tbl[0].procname = NULL;
85346- }
85347- tbl[0].extra1 = net;
85348+ tbl[0].extra1 = net;
85349+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
85350+ } else
85351+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
85352
85353- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
85354 if (net->ipv4.route_hdr == NULL)
85355 goto err_reg;
85356 return 0;
85357
85358 err_reg:
85359- if (tbl != ipv4_route_flush_table)
85360- kfree(tbl);
85361+ kfree(tbl);
85362 err_dup:
85363 return -ENOMEM;
85364 }
85365@@ -2602,7 +2602,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
85366
85367 static __net_init int rt_genid_init(struct net *net)
85368 {
85369- atomic_set(&net->rt_genid, 0);
85370+ atomic_set_unchecked(&net->rt_genid, 0);
85371 get_random_bytes(&net->ipv4.dev_addr_genid,
85372 sizeof(net->ipv4.dev_addr_genid));
85373 return 0;
85374diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
85375index d84400b..62e066e 100644
85376--- a/net/ipv4/sysctl_net_ipv4.c
85377+++ b/net/ipv4/sysctl_net_ipv4.c
85378@@ -54,7 +54,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
85379 {
85380 int ret;
85381 int range[2];
85382- ctl_table tmp = {
85383+ ctl_table_no_const tmp = {
85384 .data = &range,
85385 .maxlen = sizeof(range),
85386 .mode = table->mode,
85387@@ -107,7 +107,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
85388 int ret;
85389 gid_t urange[2];
85390 kgid_t low, high;
85391- ctl_table tmp = {
85392+ ctl_table_no_const tmp = {
85393 .data = &urange,
85394 .maxlen = sizeof(urange),
85395 .mode = table->mode,
85396@@ -138,7 +138,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
85397 void __user *buffer, size_t *lenp, loff_t *ppos)
85398 {
85399 char val[TCP_CA_NAME_MAX];
85400- ctl_table tbl = {
85401+ ctl_table_no_const tbl = {
85402 .data = val,
85403 .maxlen = TCP_CA_NAME_MAX,
85404 };
85405@@ -157,7 +157,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
85406 void __user *buffer, size_t *lenp,
85407 loff_t *ppos)
85408 {
85409- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
85410+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
85411 int ret;
85412
85413 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
85414@@ -174,7 +174,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
85415 void __user *buffer, size_t *lenp,
85416 loff_t *ppos)
85417 {
85418- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
85419+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
85420 int ret;
85421
85422 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
85423@@ -200,15 +200,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
85424 struct mem_cgroup *memcg;
85425 #endif
85426
85427- ctl_table tmp = {
85428+ ctl_table_no_const tmp = {
85429 .data = &vec,
85430 .maxlen = sizeof(vec),
85431 .mode = ctl->mode,
85432 };
85433
85434 if (!write) {
85435- ctl->data = &net->ipv4.sysctl_tcp_mem;
85436- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
85437+ ctl_table_no_const tcp_mem = *ctl;
85438+
85439+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
85440+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
85441 }
85442
85443 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
85444@@ -235,7 +237,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
85445 int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
85446 size_t *lenp, loff_t *ppos)
85447 {
85448- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
85449+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
85450 struct tcp_fastopen_context *ctxt;
85451 int ret;
85452 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
85453@@ -476,7 +478,7 @@ static struct ctl_table ipv4_table[] = {
85454 },
85455 {
85456 .procname = "ip_local_reserved_ports",
85457- .data = NULL, /* initialized in sysctl_ipv4_init */
85458+ .data = sysctl_local_reserved_ports,
85459 .maxlen = 65536,
85460 .mode = 0644,
85461 .proc_handler = proc_do_large_bitmap,
85462@@ -860,11 +862,10 @@ static struct ctl_table ipv4_net_table[] = {
85463
85464 static __net_init int ipv4_sysctl_init_net(struct net *net)
85465 {
85466- struct ctl_table *table;
85467+ ctl_table_no_const *table = NULL;
85468
85469- table = ipv4_net_table;
85470 if (!net_eq(net, &init_net)) {
85471- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
85472+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
85473 if (table == NULL)
85474 goto err_alloc;
85475
85476@@ -897,15 +898,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
85477
85478 tcp_init_mem(net);
85479
85480- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
85481+ if (!net_eq(net, &init_net))
85482+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
85483+ else
85484+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
85485 if (net->ipv4.ipv4_hdr == NULL)
85486 goto err_reg;
85487
85488 return 0;
85489
85490 err_reg:
85491- if (!net_eq(net, &init_net))
85492- kfree(table);
85493+ kfree(table);
85494 err_alloc:
85495 return -ENOMEM;
85496 }
85497@@ -927,16 +930,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
85498 static __init int sysctl_ipv4_init(void)
85499 {
85500 struct ctl_table_header *hdr;
85501- struct ctl_table *i;
85502-
85503- for (i = ipv4_table; i->procname; i++) {
85504- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
85505- i->data = sysctl_local_reserved_ports;
85506- break;
85507- }
85508- }
85509- if (!i->procname)
85510- return -EINVAL;
85511
85512 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
85513 if (hdr == NULL)
85514diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
85515index ad70a96..50cb55b 100644
85516--- a/net/ipv4/tcp_input.c
85517+++ b/net/ipv4/tcp_input.c
85518@@ -4733,7 +4733,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
85519 * simplifies code)
85520 */
85521 static void
85522-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
85523+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
85524 struct sk_buff *head, struct sk_buff *tail,
85525 u32 start, u32 end)
85526 {
85527@@ -5850,6 +5850,7 @@ discard:
85528 tcp_paws_reject(&tp->rx_opt, 0))
85529 goto discard_and_undo;
85530
85531+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
85532 if (th->syn) {
85533 /* We see SYN without ACK. It is attempt of
85534 * simultaneous connect with crossed SYNs.
85535@@ -5900,6 +5901,7 @@ discard:
85536 goto discard;
85537 #endif
85538 }
85539+#endif
85540 /* "fifth, if neither of the SYN or RST bits is set then
85541 * drop the segment and return."
85542 */
85543@@ -5944,7 +5946,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
85544 goto discard;
85545
85546 if (th->syn) {
85547- if (th->fin)
85548+ if (th->fin || th->urg || th->psh)
85549 goto discard;
85550 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
85551 return 1;
85552diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
85553index eadb693..e8f7251 100644
85554--- a/net/ipv4/tcp_ipv4.c
85555+++ b/net/ipv4/tcp_ipv4.c
85556@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
85557 EXPORT_SYMBOL(sysctl_tcp_low_latency);
85558
85559
85560+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85561+extern int grsec_enable_blackhole;
85562+#endif
85563+
85564 #ifdef CONFIG_TCP_MD5SIG
85565 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
85566 __be32 daddr, __be32 saddr, const struct tcphdr *th);
85567@@ -1895,6 +1899,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
85568 return 0;
85569
85570 reset:
85571+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85572+ if (!grsec_enable_blackhole)
85573+#endif
85574 tcp_v4_send_reset(rsk, skb);
85575 discard:
85576 kfree_skb(skb);
85577@@ -1994,12 +2001,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
85578 TCP_SKB_CB(skb)->sacked = 0;
85579
85580 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
85581- if (!sk)
85582+ if (!sk) {
85583+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85584+ ret = 1;
85585+#endif
85586 goto no_tcp_socket;
85587-
85588+ }
85589 process:
85590- if (sk->sk_state == TCP_TIME_WAIT)
85591+ if (sk->sk_state == TCP_TIME_WAIT) {
85592+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85593+ ret = 2;
85594+#endif
85595 goto do_time_wait;
85596+ }
85597
85598 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
85599 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
85600@@ -2050,6 +2064,10 @@ no_tcp_socket:
85601 bad_packet:
85602 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
85603 } else {
85604+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85605+ if (!grsec_enable_blackhole || (ret == 1 &&
85606+ (skb->dev->flags & IFF_LOOPBACK)))
85607+#endif
85608 tcp_v4_send_reset(NULL, skb);
85609 }
85610
85611diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
85612index f35f2df..ccb5ca6 100644
85613--- a/net/ipv4/tcp_minisocks.c
85614+++ b/net/ipv4/tcp_minisocks.c
85615@@ -27,6 +27,10 @@
85616 #include <net/inet_common.h>
85617 #include <net/xfrm.h>
85618
85619+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85620+extern int grsec_enable_blackhole;
85621+#endif
85622+
85623 int sysctl_tcp_syncookies __read_mostly = 1;
85624 EXPORT_SYMBOL(sysctl_tcp_syncookies);
85625
85626@@ -742,7 +746,10 @@ embryonic_reset:
85627 * avoid becoming vulnerable to outside attack aiming at
85628 * resetting legit local connections.
85629 */
85630- req->rsk_ops->send_reset(sk, skb);
85631+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85632+ if (!grsec_enable_blackhole)
85633+#endif
85634+ req->rsk_ops->send_reset(sk, skb);
85635 } else if (fastopen) { /* received a valid RST pkt */
85636 reqsk_fastopen_remove(sk, req, true);
85637 tcp_reset(sk);
85638diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
85639index 4526fe6..1a34e43 100644
85640--- a/net/ipv4/tcp_probe.c
85641+++ b/net/ipv4/tcp_probe.c
85642@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
85643 if (cnt + width >= len)
85644 break;
85645
85646- if (copy_to_user(buf + cnt, tbuf, width))
85647+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
85648 return -EFAULT;
85649 cnt += width;
85650 }
85651diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
85652index b78aac3..e18230b 100644
85653--- a/net/ipv4/tcp_timer.c
85654+++ b/net/ipv4/tcp_timer.c
85655@@ -22,6 +22,10 @@
85656 #include <linux/gfp.h>
85657 #include <net/tcp.h>
85658
85659+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85660+extern int grsec_lastack_retries;
85661+#endif
85662+
85663 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
85664 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
85665 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
85666@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
85667 }
85668 }
85669
85670+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85671+ if ((sk->sk_state == TCP_LAST_ACK) &&
85672+ (grsec_lastack_retries > 0) &&
85673+ (grsec_lastack_retries < retry_until))
85674+ retry_until = grsec_lastack_retries;
85675+#endif
85676+
85677 if (retransmits_timed_out(sk, retry_until,
85678 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
85679 /* Has it gone just too far? */
85680diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
85681index 1f4d405..3524677 100644
85682--- a/net/ipv4/udp.c
85683+++ b/net/ipv4/udp.c
85684@@ -87,6 +87,7 @@
85685 #include <linux/types.h>
85686 #include <linux/fcntl.h>
85687 #include <linux/module.h>
85688+#include <linux/security.h>
85689 #include <linux/socket.h>
85690 #include <linux/sockios.h>
85691 #include <linux/igmp.h>
85692@@ -111,6 +112,10 @@
85693 #include <trace/events/skb.h>
85694 #include "udp_impl.h"
85695
85696+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85697+extern int grsec_enable_blackhole;
85698+#endif
85699+
85700 struct udp_table udp_table __read_mostly;
85701 EXPORT_SYMBOL(udp_table);
85702
85703@@ -569,6 +574,9 @@ found:
85704 return s;
85705 }
85706
85707+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
85708+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
85709+
85710 /*
85711 * This routine is called by the ICMP module when it gets some
85712 * sort of error condition. If err < 0 then the socket should
85713@@ -864,9 +872,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
85714 dport = usin->sin_port;
85715 if (dport == 0)
85716 return -EINVAL;
85717+
85718+ err = gr_search_udp_sendmsg(sk, usin);
85719+ if (err)
85720+ return err;
85721 } else {
85722 if (sk->sk_state != TCP_ESTABLISHED)
85723 return -EDESTADDRREQ;
85724+
85725+ err = gr_search_udp_sendmsg(sk, NULL);
85726+ if (err)
85727+ return err;
85728+
85729 daddr = inet->inet_daddr;
85730 dport = inet->inet_dport;
85731 /* Open fast path for connected socket.
85732@@ -1108,7 +1125,7 @@ static unsigned int first_packet_length(struct sock *sk)
85733 udp_lib_checksum_complete(skb)) {
85734 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
85735 IS_UDPLITE(sk));
85736- atomic_inc(&sk->sk_drops);
85737+ atomic_inc_unchecked(&sk->sk_drops);
85738 __skb_unlink(skb, rcvq);
85739 __skb_queue_tail(&list_kill, skb);
85740 }
85741@@ -1194,6 +1211,10 @@ try_again:
85742 if (!skb)
85743 goto out;
85744
85745+ err = gr_search_udp_recvmsg(sk, skb);
85746+ if (err)
85747+ goto out_free;
85748+
85749 ulen = skb->len - sizeof(struct udphdr);
85750 copied = len;
85751 if (copied > ulen)
85752@@ -1227,7 +1248,7 @@ try_again:
85753 if (unlikely(err)) {
85754 trace_kfree_skb(skb, udp_recvmsg);
85755 if (!peeked) {
85756- atomic_inc(&sk->sk_drops);
85757+ atomic_inc_unchecked(&sk->sk_drops);
85758 UDP_INC_STATS_USER(sock_net(sk),
85759 UDP_MIB_INERRORS, is_udplite);
85760 }
85761@@ -1510,7 +1531,7 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
85762
85763 drop:
85764 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
85765- atomic_inc(&sk->sk_drops);
85766+ atomic_inc_unchecked(&sk->sk_drops);
85767 kfree_skb(skb);
85768 return -1;
85769 }
85770@@ -1529,7 +1550,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
85771 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
85772
85773 if (!skb1) {
85774- atomic_inc(&sk->sk_drops);
85775+ atomic_inc_unchecked(&sk->sk_drops);
85776 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
85777 IS_UDPLITE(sk));
85778 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
85779@@ -1698,6 +1719,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
85780 goto csum_error;
85781
85782 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
85783+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
85784+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
85785+#endif
85786 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
85787
85788 /*
85789@@ -2120,7 +2144,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
85790 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
85791 0, sock_i_ino(sp),
85792 atomic_read(&sp->sk_refcnt), sp,
85793- atomic_read(&sp->sk_drops), len);
85794+ atomic_read_unchecked(&sp->sk_drops), len);
85795 }
85796
85797 int udp4_seq_show(struct seq_file *seq, void *v)
85798diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
85799index 1b5d8cb..ffb0833 100644
85800--- a/net/ipv6/addrconf.c
85801+++ b/net/ipv6/addrconf.c
85802@@ -2272,7 +2272,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
85803 p.iph.ihl = 5;
85804 p.iph.protocol = IPPROTO_IPV6;
85805 p.iph.ttl = 64;
85806- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
85807+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
85808
85809 if (ops->ndo_do_ioctl) {
85810 mm_segment_t oldfs = get_fs();
85811@@ -4388,7 +4388,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
85812 int *valp = ctl->data;
85813 int val = *valp;
85814 loff_t pos = *ppos;
85815- ctl_table lctl;
85816+ ctl_table_no_const lctl;
85817 int ret;
85818
85819 /*
85820@@ -4470,7 +4470,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
85821 int *valp = ctl->data;
85822 int val = *valp;
85823 loff_t pos = *ppos;
85824- ctl_table lctl;
85825+ ctl_table_no_const lctl;
85826 int ret;
85827
85828 /*
85829diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
85830index fff5bdd..15194fb 100644
85831--- a/net/ipv6/icmp.c
85832+++ b/net/ipv6/icmp.c
85833@@ -973,7 +973,7 @@ ctl_table ipv6_icmp_table_template[] = {
85834
85835 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
85836 {
85837- struct ctl_table *table;
85838+ ctl_table_no_const *table;
85839
85840 table = kmemdup(ipv6_icmp_table_template,
85841 sizeof(ipv6_icmp_table_template),
85842diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
85843index 131dd09..f7ed64f 100644
85844--- a/net/ipv6/ip6_gre.c
85845+++ b/net/ipv6/ip6_gre.c
85846@@ -73,7 +73,7 @@ struct ip6gre_net {
85847 struct net_device *fb_tunnel_dev;
85848 };
85849
85850-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
85851+static struct rtnl_link_ops ip6gre_link_ops;
85852 static int ip6gre_tunnel_init(struct net_device *dev);
85853 static void ip6gre_tunnel_setup(struct net_device *dev);
85854 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
85855@@ -1337,7 +1337,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
85856 }
85857
85858
85859-static struct inet6_protocol ip6gre_protocol __read_mostly = {
85860+static struct inet6_protocol ip6gre_protocol = {
85861 .handler = ip6gre_rcv,
85862 .err_handler = ip6gre_err,
85863 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
85864@@ -1671,7 +1671,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
85865 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
85866 };
85867
85868-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
85869+static struct rtnl_link_ops ip6gre_link_ops = {
85870 .kind = "ip6gre",
85871 .maxtype = IFLA_GRE_MAX,
85872 .policy = ip6gre_policy,
85873@@ -1684,7 +1684,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
85874 .fill_info = ip6gre_fill_info,
85875 };
85876
85877-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
85878+static struct rtnl_link_ops ip6gre_tap_ops = {
85879 .kind = "ip6gretap",
85880 .maxtype = IFLA_GRE_MAX,
85881 .policy = ip6gre_policy,
85882diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
85883index a14f28b..b4b8956 100644
85884--- a/net/ipv6/ip6_tunnel.c
85885+++ b/net/ipv6/ip6_tunnel.c
85886@@ -87,7 +87,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
85887
85888 static int ip6_tnl_dev_init(struct net_device *dev);
85889 static void ip6_tnl_dev_setup(struct net_device *dev);
85890-static struct rtnl_link_ops ip6_link_ops __read_mostly;
85891+static struct rtnl_link_ops ip6_link_ops;
85892
85893 static int ip6_tnl_net_id __read_mostly;
85894 struct ip6_tnl_net {
85895@@ -1686,7 +1686,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
85896 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
85897 };
85898
85899-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
85900+static struct rtnl_link_ops ip6_link_ops = {
85901 .kind = "ip6tnl",
85902 .maxtype = IFLA_IPTUN_MAX,
85903 .policy = ip6_tnl_policy,
85904diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
85905index d1e2e8e..51c19ae 100644
85906--- a/net/ipv6/ipv6_sockglue.c
85907+++ b/net/ipv6/ipv6_sockglue.c
85908@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
85909 if (sk->sk_type != SOCK_STREAM)
85910 return -ENOPROTOOPT;
85911
85912- msg.msg_control = optval;
85913+ msg.msg_control = (void __force_kernel *)optval;
85914 msg.msg_controllen = len;
85915 msg.msg_flags = flags;
85916
85917diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
85918index 125a90d..2a11f36 100644
85919--- a/net/ipv6/netfilter/ip6_tables.c
85920+++ b/net/ipv6/netfilter/ip6_tables.c
85921@@ -1076,14 +1076,14 @@ static int compat_table_info(const struct xt_table_info *info,
85922 #endif
85923
85924 static int get_info(struct net *net, void __user *user,
85925- const int *len, int compat)
85926+ int len, int compat)
85927 {
85928 char name[XT_TABLE_MAXNAMELEN];
85929 struct xt_table *t;
85930 int ret;
85931
85932- if (*len != sizeof(struct ip6t_getinfo)) {
85933- duprintf("length %u != %zu\n", *len,
85934+ if (len != sizeof(struct ip6t_getinfo)) {
85935+ duprintf("length %u != %zu\n", len,
85936 sizeof(struct ip6t_getinfo));
85937 return -EINVAL;
85938 }
85939@@ -1120,7 +1120,7 @@ static int get_info(struct net *net, void __user *user,
85940 info.size = private->size;
85941 strcpy(info.name, name);
85942
85943- if (copy_to_user(user, &info, *len) != 0)
85944+ if (copy_to_user(user, &info, len) != 0)
85945 ret = -EFAULT;
85946 else
85947 ret = 0;
85948@@ -1974,7 +1974,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85949
85950 switch (cmd) {
85951 case IP6T_SO_GET_INFO:
85952- ret = get_info(sock_net(sk), user, len, 1);
85953+ ret = get_info(sock_net(sk), user, *len, 1);
85954 break;
85955 case IP6T_SO_GET_ENTRIES:
85956 ret = compat_get_entries(sock_net(sk), user, len);
85957@@ -2021,7 +2021,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
85958
85959 switch (cmd) {
85960 case IP6T_SO_GET_INFO:
85961- ret = get_info(sock_net(sk), user, len, 0);
85962+ ret = get_info(sock_net(sk), user, *len, 0);
85963 break;
85964
85965 case IP6T_SO_GET_ENTRIES:
85966diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
85967index 3dacecc..2939087 100644
85968--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
85969+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
85970@@ -87,12 +87,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
85971
85972 static int nf_ct_frag6_sysctl_register(struct net *net)
85973 {
85974- struct ctl_table *table;
85975+ ctl_table_no_const *table = NULL;
85976 struct ctl_table_header *hdr;
85977
85978- table = nf_ct_frag6_sysctl_table;
85979 if (!net_eq(net, &init_net)) {
85980- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
85981+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
85982 GFP_KERNEL);
85983 if (table == NULL)
85984 goto err_alloc;
85985@@ -100,9 +99,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
85986 table[0].data = &net->ipv6.frags.high_thresh;
85987 table[1].data = &net->ipv6.frags.low_thresh;
85988 table[2].data = &net->ipv6.frags.timeout;
85989- }
85990-
85991- hdr = register_net_sysctl(net, "net/netfilter", table);
85992+ hdr = register_net_sysctl(net, "net/netfilter", table);
85993+ } else
85994+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
85995 if (hdr == NULL)
85996 goto err_reg;
85997
85998@@ -110,8 +109,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
85999 return 0;
86000
86001 err_reg:
86002- if (!net_eq(net, &init_net))
86003- kfree(table);
86004+ kfree(table);
86005 err_alloc:
86006 return -ENOMEM;
86007 }
86008diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
86009index 70fa814..d70c28c 100644
86010--- a/net/ipv6/raw.c
86011+++ b/net/ipv6/raw.c
86012@@ -379,7 +379,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
86013 {
86014 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
86015 skb_checksum_complete(skb)) {
86016- atomic_inc(&sk->sk_drops);
86017+ atomic_inc_unchecked(&sk->sk_drops);
86018 kfree_skb(skb);
86019 return NET_RX_DROP;
86020 }
86021@@ -407,7 +407,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
86022 struct raw6_sock *rp = raw6_sk(sk);
86023
86024 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
86025- atomic_inc(&sk->sk_drops);
86026+ atomic_inc_unchecked(&sk->sk_drops);
86027 kfree_skb(skb);
86028 return NET_RX_DROP;
86029 }
86030@@ -431,7 +431,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
86031
86032 if (inet->hdrincl) {
86033 if (skb_checksum_complete(skb)) {
86034- atomic_inc(&sk->sk_drops);
86035+ atomic_inc_unchecked(&sk->sk_drops);
86036 kfree_skb(skb);
86037 return NET_RX_DROP;
86038 }
86039@@ -604,7 +604,7 @@ out:
86040 return err;
86041 }
86042
86043-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
86044+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
86045 struct flowi6 *fl6, struct dst_entry **dstp,
86046 unsigned int flags)
86047 {
86048@@ -916,12 +916,15 @@ do_confirm:
86049 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
86050 char __user *optval, int optlen)
86051 {
86052+ struct icmp6_filter filter;
86053+
86054 switch (optname) {
86055 case ICMPV6_FILTER:
86056 if (optlen > sizeof(struct icmp6_filter))
86057 optlen = sizeof(struct icmp6_filter);
86058- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
86059+ if (copy_from_user(&filter, optval, optlen))
86060 return -EFAULT;
86061+ raw6_sk(sk)->filter = filter;
86062 return 0;
86063 default:
86064 return -ENOPROTOOPT;
86065@@ -934,6 +937,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
86066 char __user *optval, int __user *optlen)
86067 {
86068 int len;
86069+ struct icmp6_filter filter;
86070
86071 switch (optname) {
86072 case ICMPV6_FILTER:
86073@@ -945,7 +949,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
86074 len = sizeof(struct icmp6_filter);
86075 if (put_user(len, optlen))
86076 return -EFAULT;
86077- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
86078+ filter = raw6_sk(sk)->filter;
86079+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
86080 return -EFAULT;
86081 return 0;
86082 default:
86083@@ -1253,7 +1258,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
86084 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
86085 0,
86086 sock_i_ino(sp),
86087- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
86088+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
86089 }
86090
86091 static int raw6_seq_show(struct seq_file *seq, void *v)
86092diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
86093index e5253ec..0410257 100644
86094--- a/net/ipv6/reassembly.c
86095+++ b/net/ipv6/reassembly.c
86096@@ -604,12 +604,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
86097
86098 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
86099 {
86100- struct ctl_table *table;
86101+ ctl_table_no_const *table = NULL;
86102 struct ctl_table_header *hdr;
86103
86104- table = ip6_frags_ns_ctl_table;
86105 if (!net_eq(net, &init_net)) {
86106- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
86107+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
86108 if (table == NULL)
86109 goto err_alloc;
86110
86111@@ -620,9 +619,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
86112 /* Don't export sysctls to unprivileged users */
86113 if (net->user_ns != &init_user_ns)
86114 table[0].procname = NULL;
86115- }
86116+ hdr = register_net_sysctl(net, "net/ipv6", table);
86117+ } else
86118+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
86119
86120- hdr = register_net_sysctl(net, "net/ipv6", table);
86121 if (hdr == NULL)
86122 goto err_reg;
86123
86124@@ -630,8 +630,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
86125 return 0;
86126
86127 err_reg:
86128- if (!net_eq(net, &init_net))
86129- kfree(table);
86130+ kfree(table);
86131 err_alloc:
86132 return -ENOMEM;
86133 }
86134diff --git a/net/ipv6/route.c b/net/ipv6/route.c
86135index 6f9f7b6..2306d63 100644
86136--- a/net/ipv6/route.c
86137+++ b/net/ipv6/route.c
86138@@ -2965,7 +2965,7 @@ ctl_table ipv6_route_table_template[] = {
86139
86140 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
86141 {
86142- struct ctl_table *table;
86143+ ctl_table_no_const *table;
86144
86145 table = kmemdup(ipv6_route_table_template,
86146 sizeof(ipv6_route_table_template),
86147diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
86148index cfba99b..20ca511 100644
86149--- a/net/ipv6/sit.c
86150+++ b/net/ipv6/sit.c
86151@@ -72,7 +72,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
86152 static int ipip6_tunnel_init(struct net_device *dev);
86153 static void ipip6_tunnel_setup(struct net_device *dev);
86154 static void ipip6_dev_free(struct net_device *dev);
86155-static struct rtnl_link_ops sit_link_ops __read_mostly;
86156+static struct rtnl_link_ops sit_link_ops;
86157
86158 static int sit_net_id __read_mostly;
86159 struct sit_net {
86160@@ -1463,7 +1463,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
86161 #endif
86162 };
86163
86164-static struct rtnl_link_ops sit_link_ops __read_mostly = {
86165+static struct rtnl_link_ops sit_link_ops = {
86166 .kind = "sit",
86167 .maxtype = IFLA_IPTUN_MAX,
86168 .policy = ipip6_policy,
86169diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
86170index e85c48b..b8268d3 100644
86171--- a/net/ipv6/sysctl_net_ipv6.c
86172+++ b/net/ipv6/sysctl_net_ipv6.c
86173@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
86174
86175 static int __net_init ipv6_sysctl_net_init(struct net *net)
86176 {
86177- struct ctl_table *ipv6_table;
86178+ ctl_table_no_const *ipv6_table;
86179 struct ctl_table *ipv6_route_table;
86180 struct ctl_table *ipv6_icmp_table;
86181 int err;
86182diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
86183index 4f435371..5de9da7 100644
86184--- a/net/ipv6/tcp_ipv6.c
86185+++ b/net/ipv6/tcp_ipv6.c
86186@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
86187 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
86188 }
86189
86190+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86191+extern int grsec_enable_blackhole;
86192+#endif
86193+
86194 static void tcp_v6_hash(struct sock *sk)
86195 {
86196 if (sk->sk_state != TCP_CLOSE) {
86197@@ -1433,6 +1437,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
86198 return 0;
86199
86200 reset:
86201+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86202+ if (!grsec_enable_blackhole)
86203+#endif
86204 tcp_v6_send_reset(sk, skb);
86205 discard:
86206 if (opt_skb)
86207@@ -1514,12 +1521,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
86208 TCP_SKB_CB(skb)->sacked = 0;
86209
86210 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
86211- if (!sk)
86212+ if (!sk) {
86213+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86214+ ret = 1;
86215+#endif
86216 goto no_tcp_socket;
86217+ }
86218
86219 process:
86220- if (sk->sk_state == TCP_TIME_WAIT)
86221+ if (sk->sk_state == TCP_TIME_WAIT) {
86222+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86223+ ret = 2;
86224+#endif
86225 goto do_time_wait;
86226+ }
86227
86228 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
86229 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
86230@@ -1568,6 +1583,10 @@ no_tcp_socket:
86231 bad_packet:
86232 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
86233 } else {
86234+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86235+ if (!grsec_enable_blackhole || (ret == 1 &&
86236+ (skb->dev->flags & IFF_LOOPBACK)))
86237+#endif
86238 tcp_v6_send_reset(NULL, skb);
86239 }
86240
86241diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
86242index fb08329..2d6919e 100644
86243--- a/net/ipv6/udp.c
86244+++ b/net/ipv6/udp.c
86245@@ -51,6 +51,10 @@
86246 #include <trace/events/skb.h>
86247 #include "udp_impl.h"
86248
86249+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86250+extern int grsec_enable_blackhole;
86251+#endif
86252+
86253 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
86254 {
86255 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
86256@@ -395,7 +399,7 @@ try_again:
86257 if (unlikely(err)) {
86258 trace_kfree_skb(skb, udpv6_recvmsg);
86259 if (!peeked) {
86260- atomic_inc(&sk->sk_drops);
86261+ atomic_inc_unchecked(&sk->sk_drops);
86262 if (is_udp4)
86263 UDP_INC_STATS_USER(sock_net(sk),
86264 UDP_MIB_INERRORS,
86265@@ -633,7 +637,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86266 return rc;
86267 drop:
86268 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
86269- atomic_inc(&sk->sk_drops);
86270+ atomic_inc_unchecked(&sk->sk_drops);
86271 kfree_skb(skb);
86272 return -1;
86273 }
86274@@ -691,7 +695,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
86275 if (likely(skb1 == NULL))
86276 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
86277 if (!skb1) {
86278- atomic_inc(&sk->sk_drops);
86279+ atomic_inc_unchecked(&sk->sk_drops);
86280 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
86281 IS_UDPLITE(sk));
86282 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
86283@@ -862,6 +866,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
86284 goto discard;
86285
86286 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
86287+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
86288+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
86289+#endif
86290 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
86291
86292 kfree_skb(skb);
86293@@ -1379,7 +1386,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
86294 0,
86295 sock_i_ino(sp),
86296 atomic_read(&sp->sk_refcnt), sp,
86297- atomic_read(&sp->sk_drops));
86298+ atomic_read_unchecked(&sp->sk_drops));
86299 }
86300
86301 int udp6_seq_show(struct seq_file *seq, void *v)
86302diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
86303index a68c88c..d55b0c5 100644
86304--- a/net/irda/ircomm/ircomm_tty.c
86305+++ b/net/irda/ircomm/ircomm_tty.c
86306@@ -312,12 +312,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
86307 add_wait_queue(&port->open_wait, &wait);
86308
86309 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
86310- __FILE__, __LINE__, tty->driver->name, port->count);
86311+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
86312
86313 spin_lock_irqsave(&port->lock, flags);
86314 if (!tty_hung_up_p(filp)) {
86315 extra_count = 1;
86316- port->count--;
86317+ atomic_dec(&port->count);
86318 }
86319 spin_unlock_irqrestore(&port->lock, flags);
86320 port->blocked_open++;
86321@@ -353,7 +353,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
86322 }
86323
86324 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
86325- __FILE__, __LINE__, tty->driver->name, port->count);
86326+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
86327
86328 schedule();
86329 }
86330@@ -364,13 +364,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
86331 if (extra_count) {
86332 /* ++ is not atomic, so this should be protected - Jean II */
86333 spin_lock_irqsave(&port->lock, flags);
86334- port->count++;
86335+ atomic_inc(&port->count);
86336 spin_unlock_irqrestore(&port->lock, flags);
86337 }
86338 port->blocked_open--;
86339
86340 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
86341- __FILE__, __LINE__, tty->driver->name, port->count);
86342+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
86343
86344 if (!retval)
86345 port->flags |= ASYNC_NORMAL_ACTIVE;
86346@@ -444,12 +444,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
86347
86348 /* ++ is not atomic, so this should be protected - Jean II */
86349 spin_lock_irqsave(&self->port.lock, flags);
86350- self->port.count++;
86351+ atomic_inc(&self->port.count);
86352 spin_unlock_irqrestore(&self->port.lock, flags);
86353 tty_port_tty_set(&self->port, tty);
86354
86355 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
86356- self->line, self->port.count);
86357+ self->line, atomic_read(&self->port.count));
86358
86359 /* Not really used by us, but lets do it anyway */
86360 tty->low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
86361@@ -986,7 +986,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
86362 tty_kref_put(port->tty);
86363 }
86364 port->tty = NULL;
86365- port->count = 0;
86366+ atomic_set(&port->count, 0);
86367 spin_unlock_irqrestore(&port->lock, flags);
86368
86369 wake_up_interruptible(&port->open_wait);
86370@@ -1343,7 +1343,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
86371 seq_putc(m, '\n');
86372
86373 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
86374- seq_printf(m, "Open count: %d\n", self->port.count);
86375+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
86376 seq_printf(m, "Max data size: %d\n", self->max_data_size);
86377 seq_printf(m, "Max header size: %d\n", self->max_header_size);
86378
86379diff --git a/net/irda/iriap.c b/net/irda/iriap.c
86380index e71e85b..29340a9 100644
86381--- a/net/irda/iriap.c
86382+++ b/net/irda/iriap.c
86383@@ -495,8 +495,11 @@ static void iriap_getvaluebyclass_confirm(struct iriap_cb *self,
86384 /* case CS_ISO_8859_9: */
86385 /* case CS_UNICODE: */
86386 default:
86387- IRDA_DEBUG(0, "%s(), charset %s, not supported\n",
86388- __func__, ias_charset_types[charset]);
86389+ IRDA_DEBUG(0, "%s(), charset [%d] %s, not supported\n",
86390+ __func__, charset,
86391+ charset < ARRAY_SIZE(ias_charset_types) ?
86392+ ias_charset_types[charset] :
86393+ "(unknown)");
86394
86395 /* Aborting, close connection! */
86396 iriap_disconnect_request(self);
86397diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
86398index cd6f7a9..e63fe89 100644
86399--- a/net/iucv/af_iucv.c
86400+++ b/net/iucv/af_iucv.c
86401@@ -782,10 +782,10 @@ static int iucv_sock_autobind(struct sock *sk)
86402
86403 write_lock_bh(&iucv_sk_list.lock);
86404
86405- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
86406+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
86407 while (__iucv_get_sock_by_name(name)) {
86408 sprintf(name, "%08x",
86409- atomic_inc_return(&iucv_sk_list.autobind_name));
86410+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
86411 }
86412
86413 write_unlock_bh(&iucv_sk_list.lock);
86414diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
86415index df08250..02021fe 100644
86416--- a/net/iucv/iucv.c
86417+++ b/net/iucv/iucv.c
86418@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
86419 return NOTIFY_OK;
86420 }
86421
86422-static struct notifier_block __refdata iucv_cpu_notifier = {
86423+static struct notifier_block iucv_cpu_notifier = {
86424 .notifier_call = iucv_cpu_notify,
86425 };
86426
86427diff --git a/net/key/af_key.c b/net/key/af_key.c
86428index 5b426a6..970032b 100644
86429--- a/net/key/af_key.c
86430+++ b/net/key/af_key.c
86431@@ -3019,10 +3019,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
86432 static u32 get_acqseq(void)
86433 {
86434 u32 res;
86435- static atomic_t acqseq;
86436+ static atomic_unchecked_t acqseq;
86437
86438 do {
86439- res = atomic_inc_return(&acqseq);
86440+ res = atomic_inc_return_unchecked(&acqseq);
86441 } while (!res);
86442 return res;
86443 }
86444diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
86445index 716605c..044e9e1 100644
86446--- a/net/l2tp/l2tp_ppp.c
86447+++ b/net/l2tp/l2tp_ppp.c
86448@@ -355,6 +355,7 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
86449 l2tp_xmit_skb(session, skb, session->hdr_len);
86450
86451 sock_put(ps->tunnel_sock);
86452+ sock_put(sk);
86453
86454 return error;
86455
86456diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
86457index 0479c64..d031db6 100644
86458--- a/net/mac80211/cfg.c
86459+++ b/net/mac80211/cfg.c
86460@@ -790,7 +790,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
86461 ret = ieee80211_vif_use_channel(sdata, chandef,
86462 IEEE80211_CHANCTX_EXCLUSIVE);
86463 }
86464- } else if (local->open_count == local->monitors) {
86465+ } else if (local_read(&local->open_count) == local->monitors) {
86466 local->_oper_channel = chandef->chan;
86467 local->_oper_channel_type = cfg80211_get_chandef_type(chandef);
86468 ieee80211_hw_config(local, 0);
86469@@ -2716,7 +2716,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
86470 else
86471 local->probe_req_reg--;
86472
86473- if (!local->open_count)
86474+ if (!local_read(&local->open_count))
86475 break;
86476
86477 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
86478diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
86479index 2ed065c..948177f 100644
86480--- a/net/mac80211/ieee80211_i.h
86481+++ b/net/mac80211/ieee80211_i.h
86482@@ -28,6 +28,7 @@
86483 #include <net/ieee80211_radiotap.h>
86484 #include <net/cfg80211.h>
86485 #include <net/mac80211.h>
86486+#include <asm/local.h>
86487 #include "key.h"
86488 #include "sta_info.h"
86489 #include "debug.h"
86490@@ -909,7 +910,7 @@ struct ieee80211_local {
86491 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
86492 spinlock_t queue_stop_reason_lock;
86493
86494- int open_count;
86495+ local_t open_count;
86496 int monitors, cooked_mntrs;
86497 /* number of interfaces with corresponding FIF_ flags */
86498 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
86499diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
86500index 8be854e..ad72a69 100644
86501--- a/net/mac80211/iface.c
86502+++ b/net/mac80211/iface.c
86503@@ -546,7 +546,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86504 break;
86505 }
86506
86507- if (local->open_count == 0) {
86508+ if (local_read(&local->open_count) == 0) {
86509 res = drv_start(local);
86510 if (res)
86511 goto err_del_bss;
86512@@ -591,7 +591,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86513 break;
86514 }
86515
86516- if (local->monitors == 0 && local->open_count == 0) {
86517+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
86518 res = ieee80211_add_virtual_monitor(local);
86519 if (res)
86520 goto err_stop;
86521@@ -699,7 +699,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86522 mutex_unlock(&local->mtx);
86523
86524 if (coming_up)
86525- local->open_count++;
86526+ local_inc(&local->open_count);
86527
86528 if (hw_reconf_flags)
86529 ieee80211_hw_config(local, hw_reconf_flags);
86530@@ -713,7 +713,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
86531 err_del_interface:
86532 drv_remove_interface(local, sdata);
86533 err_stop:
86534- if (!local->open_count)
86535+ if (!local_read(&local->open_count))
86536 drv_stop(local);
86537 err_del_bss:
86538 sdata->bss = NULL;
86539@@ -827,7 +827,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
86540 }
86541
86542 if (going_down)
86543- local->open_count--;
86544+ local_dec(&local->open_count);
86545
86546 switch (sdata->vif.type) {
86547 case NL80211_IFTYPE_AP_VLAN:
86548@@ -884,7 +884,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
86549
86550 ieee80211_recalc_ps(local, -1);
86551
86552- if (local->open_count == 0) {
86553+ if (local_read(&local->open_count) == 0) {
86554 if (local->ops->napi_poll)
86555 napi_disable(&local->napi);
86556 ieee80211_clear_tx_pending(local);
86557@@ -910,7 +910,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
86558 }
86559 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
86560
86561- if (local->monitors == local->open_count && local->monitors > 0)
86562+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
86563 ieee80211_add_virtual_monitor(local);
86564 }
86565
86566diff --git a/net/mac80211/main.c b/net/mac80211/main.c
86567index 1b087ff..bf600e9 100644
86568--- a/net/mac80211/main.c
86569+++ b/net/mac80211/main.c
86570@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
86571 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
86572 IEEE80211_CONF_CHANGE_POWER);
86573
86574- if (changed && local->open_count) {
86575+ if (changed && local_read(&local->open_count)) {
86576 ret = drv_config(local, changed);
86577 /*
86578 * Goal:
86579diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
86580index 79a48f3..5e185c9 100644
86581--- a/net/mac80211/pm.c
86582+++ b/net/mac80211/pm.c
86583@@ -35,7 +35,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86584 struct sta_info *sta;
86585 struct ieee80211_chanctx *ctx;
86586
86587- if (!local->open_count)
86588+ if (!local_read(&local->open_count))
86589 goto suspend;
86590
86591 ieee80211_scan_cancel(local);
86592@@ -73,7 +73,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86593 cancel_work_sync(&local->dynamic_ps_enable_work);
86594 del_timer_sync(&local->dynamic_ps_timer);
86595
86596- local->wowlan = wowlan && local->open_count;
86597+ local->wowlan = wowlan && local_read(&local->open_count);
86598 if (local->wowlan) {
86599 int err = drv_suspend(local, wowlan);
86600 if (err < 0) {
86601@@ -187,7 +187,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
86602 mutex_unlock(&local->chanctx_mtx);
86603
86604 /* stop hardware - this must stop RX */
86605- if (local->open_count)
86606+ if (local_read(&local->open_count))
86607 ieee80211_stop_device(local);
86608
86609 suspend:
86610diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
86611index dd88381..eef4dd6 100644
86612--- a/net/mac80211/rate.c
86613+++ b/net/mac80211/rate.c
86614@@ -493,7 +493,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
86615
86616 ASSERT_RTNL();
86617
86618- if (local->open_count)
86619+ if (local_read(&local->open_count))
86620 return -EBUSY;
86621
86622 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
86623diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
86624index c97a065..ff61928 100644
86625--- a/net/mac80211/rc80211_pid_debugfs.c
86626+++ b/net/mac80211/rc80211_pid_debugfs.c
86627@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
86628
86629 spin_unlock_irqrestore(&events->lock, status);
86630
86631- if (copy_to_user(buf, pb, p))
86632+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
86633 return -EFAULT;
86634
86635 return p;
86636diff --git a/net/mac80211/util.c b/net/mac80211/util.c
86637index f11e8c5..08d0013 100644
86638--- a/net/mac80211/util.c
86639+++ b/net/mac80211/util.c
86640@@ -1380,7 +1380,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
86641 }
86642 #endif
86643 /* everything else happens only if HW was up & running */
86644- if (!local->open_count)
86645+ if (!local_read(&local->open_count))
86646 goto wake_up;
86647
86648 /*
86649diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
86650index 49e96df..63a51c3 100644
86651--- a/net/netfilter/Kconfig
86652+++ b/net/netfilter/Kconfig
86653@@ -936,6 +936,16 @@ config NETFILTER_XT_MATCH_ESP
86654
86655 To compile it as a module, choose M here. If unsure, say N.
86656
86657+config NETFILTER_XT_MATCH_GRADM
86658+ tristate '"gradm" match support'
86659+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
86660+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
86661+ ---help---
86662+ The gradm match allows to match on grsecurity RBAC being enabled.
86663+ It is useful when iptables rules are applied early on bootup to
86664+ prevent connections to the machine (except from a trusted host)
86665+ while the RBAC system is disabled.
86666+
86667 config NETFILTER_XT_MATCH_HASHLIMIT
86668 tristate '"hashlimit" match support'
86669 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
86670diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
86671index 3259697..54d5393 100644
86672--- a/net/netfilter/Makefile
86673+++ b/net/netfilter/Makefile
86674@@ -109,6 +109,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
86675 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
86676 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
86677 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
86678+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
86679 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
86680 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
86681 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
86682diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
86683index 6d6d8f2..a676749 100644
86684--- a/net/netfilter/ipset/ip_set_core.c
86685+++ b/net/netfilter/ipset/ip_set_core.c
86686@@ -1800,7 +1800,7 @@ done:
86687 return ret;
86688 }
86689
86690-static struct nf_sockopt_ops so_set __read_mostly = {
86691+static struct nf_sockopt_ops so_set = {
86692 .pf = PF_INET,
86693 .get_optmin = SO_IP_SET,
86694 .get_optmax = SO_IP_SET + 1,
86695diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
86696index 30e764a..c3b6a9d 100644
86697--- a/net/netfilter/ipvs/ip_vs_conn.c
86698+++ b/net/netfilter/ipvs/ip_vs_conn.c
86699@@ -554,7 +554,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
86700 /* Increase the refcnt counter of the dest */
86701 atomic_inc(&dest->refcnt);
86702
86703- conn_flags = atomic_read(&dest->conn_flags);
86704+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
86705 if (cp->protocol != IPPROTO_UDP)
86706 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
86707 flags = cp->flags;
86708@@ -899,7 +899,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
86709 atomic_set(&cp->refcnt, 1);
86710
86711 atomic_set(&cp->n_control, 0);
86712- atomic_set(&cp->in_pkts, 0);
86713+ atomic_set_unchecked(&cp->in_pkts, 0);
86714
86715 atomic_inc(&ipvs->conn_count);
86716 if (flags & IP_VS_CONN_F_NO_CPORT)
86717@@ -1180,7 +1180,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
86718
86719 /* Don't drop the entry if its number of incoming packets is not
86720 located in [0, 8] */
86721- i = atomic_read(&cp->in_pkts);
86722+ i = atomic_read_unchecked(&cp->in_pkts);
86723 if (i > 8 || i < 0) return 0;
86724
86725 if (!todrop_rate[i]) return 0;
86726diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
86727index 47edf5a..235b07d 100644
86728--- a/net/netfilter/ipvs/ip_vs_core.c
86729+++ b/net/netfilter/ipvs/ip_vs_core.c
86730@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
86731 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
86732 /* do not touch skb anymore */
86733
86734- atomic_inc(&cp->in_pkts);
86735+ atomic_inc_unchecked(&cp->in_pkts);
86736 ip_vs_conn_put(cp);
86737 return ret;
86738 }
86739@@ -1691,7 +1691,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
86740 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
86741 pkts = sysctl_sync_threshold(ipvs);
86742 else
86743- pkts = atomic_add_return(1, &cp->in_pkts);
86744+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
86745
86746 if (ipvs->sync_state & IP_VS_STATE_MASTER)
86747 ip_vs_sync_conn(net, cp, pkts);
86748diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
86749index ec664cb..7f34a77 100644
86750--- a/net/netfilter/ipvs/ip_vs_ctl.c
86751+++ b/net/netfilter/ipvs/ip_vs_ctl.c
86752@@ -787,7 +787,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
86753 ip_vs_rs_hash(ipvs, dest);
86754 write_unlock_bh(&ipvs->rs_lock);
86755 }
86756- atomic_set(&dest->conn_flags, conn_flags);
86757+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
86758
86759 /* bind the service */
86760 if (!dest->svc) {
86761@@ -1688,7 +1688,7 @@ proc_do_sync_ports(ctl_table *table, int write,
86762 * align with netns init in ip_vs_control_net_init()
86763 */
86764
86765-static struct ctl_table vs_vars[] = {
86766+static ctl_table_no_const vs_vars[] __read_only = {
86767 {
86768 .procname = "amemthresh",
86769 .maxlen = sizeof(int),
86770@@ -2081,7 +2081,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
86771 " %-7s %-6d %-10d %-10d\n",
86772 &dest->addr.in6,
86773 ntohs(dest->port),
86774- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
86775+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
86776 atomic_read(&dest->weight),
86777 atomic_read(&dest->activeconns),
86778 atomic_read(&dest->inactconns));
86779@@ -2092,7 +2092,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
86780 "%-7s %-6d %-10d %-10d\n",
86781 ntohl(dest->addr.ip),
86782 ntohs(dest->port),
86783- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
86784+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
86785 atomic_read(&dest->weight),
86786 atomic_read(&dest->activeconns),
86787 atomic_read(&dest->inactconns));
86788@@ -2562,7 +2562,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
86789
86790 entry.addr = dest->addr.ip;
86791 entry.port = dest->port;
86792- entry.conn_flags = atomic_read(&dest->conn_flags);
86793+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
86794 entry.weight = atomic_read(&dest->weight);
86795 entry.u_threshold = dest->u_threshold;
86796 entry.l_threshold = dest->l_threshold;
86797@@ -3098,7 +3098,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
86798 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
86799 nla_put_u16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
86800 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
86801- (atomic_read(&dest->conn_flags) &
86802+ (atomic_read_unchecked(&dest->conn_flags) &
86803 IP_VS_CONN_F_FWD_MASK)) ||
86804 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
86805 atomic_read(&dest->weight)) ||
86806@@ -3688,7 +3688,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
86807 {
86808 int idx;
86809 struct netns_ipvs *ipvs = net_ipvs(net);
86810- struct ctl_table *tbl;
86811+ ctl_table_no_const *tbl;
86812
86813 atomic_set(&ipvs->dropentry, 0);
86814 spin_lock_init(&ipvs->dropentry_lock);
86815diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
86816index fdd89b9..bd96aa9 100644
86817--- a/net/netfilter/ipvs/ip_vs_lblc.c
86818+++ b/net/netfilter/ipvs/ip_vs_lblc.c
86819@@ -115,7 +115,7 @@ struct ip_vs_lblc_table {
86820 * IPVS LBLC sysctl table
86821 */
86822 #ifdef CONFIG_SYSCTL
86823-static ctl_table vs_vars_table[] = {
86824+static ctl_table_no_const vs_vars_table[] __read_only = {
86825 {
86826 .procname = "lblc_expiration",
86827 .data = NULL,
86828diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
86829index c03b6a3..8ce3681 100644
86830--- a/net/netfilter/ipvs/ip_vs_lblcr.c
86831+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
86832@@ -288,7 +288,7 @@ struct ip_vs_lblcr_table {
86833 * IPVS LBLCR sysctl table
86834 */
86835
86836-static ctl_table vs_vars_table[] = {
86837+static ctl_table_no_const vs_vars_table[] __read_only = {
86838 {
86839 .procname = "lblcr_expiration",
86840 .data = NULL,
86841diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
86842index 44fd10c..2a163b3 100644
86843--- a/net/netfilter/ipvs/ip_vs_sync.c
86844+++ b/net/netfilter/ipvs/ip_vs_sync.c
86845@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
86846 cp = cp->control;
86847 if (cp) {
86848 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
86849- pkts = atomic_add_return(1, &cp->in_pkts);
86850+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
86851 else
86852 pkts = sysctl_sync_threshold(ipvs);
86853 ip_vs_sync_conn(net, cp->control, pkts);
86854@@ -758,7 +758,7 @@ control:
86855 if (!cp)
86856 return;
86857 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
86858- pkts = atomic_add_return(1, &cp->in_pkts);
86859+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
86860 else
86861 pkts = sysctl_sync_threshold(ipvs);
86862 goto sloop;
86863@@ -885,7 +885,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
86864
86865 if (opt)
86866 memcpy(&cp->in_seq, opt, sizeof(*opt));
86867- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
86868+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
86869 cp->state = state;
86870 cp->old_state = cp->state;
86871 /*
86872diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
86873index ee6b7a9..f9a89f6 100644
86874--- a/net/netfilter/ipvs/ip_vs_xmit.c
86875+++ b/net/netfilter/ipvs/ip_vs_xmit.c
86876@@ -1210,7 +1210,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
86877 else
86878 rc = NF_ACCEPT;
86879 /* do not touch skb anymore */
86880- atomic_inc(&cp->in_pkts);
86881+ atomic_inc_unchecked(&cp->in_pkts);
86882 goto out;
86883 }
86884
86885@@ -1332,7 +1332,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
86886 else
86887 rc = NF_ACCEPT;
86888 /* do not touch skb anymore */
86889- atomic_inc(&cp->in_pkts);
86890+ atomic_inc_unchecked(&cp->in_pkts);
86891 goto out;
86892 }
86893
86894diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
86895index 7df424e..a527b02 100644
86896--- a/net/netfilter/nf_conntrack_acct.c
86897+++ b/net/netfilter/nf_conntrack_acct.c
86898@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
86899 #ifdef CONFIG_SYSCTL
86900 static int nf_conntrack_acct_init_sysctl(struct net *net)
86901 {
86902- struct ctl_table *table;
86903+ ctl_table_no_const *table;
86904
86905 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
86906 GFP_KERNEL);
86907diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
86908index e4a0c4f..c263f28 100644
86909--- a/net/netfilter/nf_conntrack_core.c
86910+++ b/net/netfilter/nf_conntrack_core.c
86911@@ -1529,6 +1529,10 @@ err_extend:
86912 #define DYING_NULLS_VAL ((1<<30)+1)
86913 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
86914
86915+#ifdef CONFIG_GRKERNSEC_HIDESYM
86916+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
86917+#endif
86918+
86919 static int nf_conntrack_init_net(struct net *net)
86920 {
86921 int ret;
86922@@ -1543,7 +1547,11 @@ static int nf_conntrack_init_net(struct net *net)
86923 goto err_stat;
86924 }
86925
86926+#ifdef CONFIG_GRKERNSEC_HIDESYM
86927+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
86928+#else
86929 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
86930+#endif
86931 if (!net->ct.slabname) {
86932 ret = -ENOMEM;
86933 goto err_slabname;
86934diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
86935index faa978f..1afb18f 100644
86936--- a/net/netfilter/nf_conntrack_ecache.c
86937+++ b/net/netfilter/nf_conntrack_ecache.c
86938@@ -186,7 +186,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
86939 #ifdef CONFIG_SYSCTL
86940 static int nf_conntrack_event_init_sysctl(struct net *net)
86941 {
86942- struct ctl_table *table;
86943+ ctl_table_no_const *table;
86944
86945 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
86946 GFP_KERNEL);
86947diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
86948index 884f2b3..d53b33a 100644
86949--- a/net/netfilter/nf_conntrack_helper.c
86950+++ b/net/netfilter/nf_conntrack_helper.c
86951@@ -55,7 +55,7 @@ static struct ctl_table helper_sysctl_table[] = {
86952
86953 static int nf_conntrack_helper_init_sysctl(struct net *net)
86954 {
86955- struct ctl_table *table;
86956+ ctl_table_no_const *table;
86957
86958 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
86959 GFP_KERNEL);
86960diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
86961index 51e928d..72a413a 100644
86962--- a/net/netfilter/nf_conntrack_proto.c
86963+++ b/net/netfilter/nf_conntrack_proto.c
86964@@ -51,7 +51,7 @@ nf_ct_register_sysctl(struct net *net,
86965
86966 static void
86967 nf_ct_unregister_sysctl(struct ctl_table_header **header,
86968- struct ctl_table **table,
86969+ ctl_table_no_const **table,
86970 unsigned int users)
86971 {
86972 if (users > 0)
86973diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
86974index e7185c6..4ad6c9c 100644
86975--- a/net/netfilter/nf_conntrack_standalone.c
86976+++ b/net/netfilter/nf_conntrack_standalone.c
86977@@ -470,7 +470,7 @@ static ctl_table nf_ct_netfilter_table[] = {
86978
86979 static int nf_conntrack_standalone_init_sysctl(struct net *net)
86980 {
86981- struct ctl_table *table;
86982+ ctl_table_no_const *table;
86983
86984 if (net_eq(net, &init_net)) {
86985 nf_ct_netfilter_header =
86986diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
86987index 7ea8026..bc9512d 100644
86988--- a/net/netfilter/nf_conntrack_timestamp.c
86989+++ b/net/netfilter/nf_conntrack_timestamp.c
86990@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
86991 #ifdef CONFIG_SYSCTL
86992 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
86993 {
86994- struct ctl_table *table;
86995+ ctl_table_no_const *table;
86996
86997 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
86998 GFP_KERNEL);
86999diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
87000index 9e31269..bc4c1b7 100644
87001--- a/net/netfilter/nf_log.c
87002+++ b/net/netfilter/nf_log.c
87003@@ -215,7 +215,7 @@ static const struct file_operations nflog_file_ops = {
87004
87005 #ifdef CONFIG_SYSCTL
87006 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
87007-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
87008+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
87009 static struct ctl_table_header *nf_log_dir_header;
87010
87011 static int nf_log_proc_dostring(ctl_table *table, int write,
87012@@ -246,14 +246,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
87013 rcu_assign_pointer(nf_loggers[tindex], logger);
87014 mutex_unlock(&nf_log_mutex);
87015 } else {
87016+ ctl_table_no_const nf_log_table = *table;
87017+
87018 mutex_lock(&nf_log_mutex);
87019 logger = rcu_dereference_protected(nf_loggers[tindex],
87020 lockdep_is_held(&nf_log_mutex));
87021 if (!logger)
87022- table->data = "NONE";
87023+ nf_log_table.data = "NONE";
87024 else
87025- table->data = logger->name;
87026- r = proc_dostring(table, write, buffer, lenp, ppos);
87027+ nf_log_table.data = logger->name;
87028+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
87029 mutex_unlock(&nf_log_mutex);
87030 }
87031
87032diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
87033index f042ae5..30ea486 100644
87034--- a/net/netfilter/nf_sockopt.c
87035+++ b/net/netfilter/nf_sockopt.c
87036@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
87037 }
87038 }
87039
87040- list_add(&reg->list, &nf_sockopts);
87041+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
87042 out:
87043 mutex_unlock(&nf_sockopt_mutex);
87044 return ret;
87045@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
87046 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
87047 {
87048 mutex_lock(&nf_sockopt_mutex);
87049- list_del(&reg->list);
87050+ pax_list_del((struct list_head *)&reg->list);
87051 mutex_unlock(&nf_sockopt_mutex);
87052 }
87053 EXPORT_SYMBOL(nf_unregister_sockopt);
87054diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
87055index 92fd8ec..3f6ea4b 100644
87056--- a/net/netfilter/nfnetlink_log.c
87057+++ b/net/netfilter/nfnetlink_log.c
87058@@ -72,7 +72,7 @@ struct nfulnl_instance {
87059 };
87060
87061 static DEFINE_SPINLOCK(instances_lock);
87062-static atomic_t global_seq;
87063+static atomic_unchecked_t global_seq;
87064
87065 #define INSTANCE_BUCKETS 16
87066 static struct hlist_head instance_table[INSTANCE_BUCKETS];
87067@@ -537,7 +537,7 @@ __build_packet_message(struct nfulnl_instance *inst,
87068 /* global sequence number */
87069 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
87070 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
87071- htonl(atomic_inc_return(&global_seq))))
87072+ htonl(atomic_inc_return_unchecked(&global_seq))))
87073 goto nla_put_failure;
87074
87075 if (data_len) {
87076diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
87077new file mode 100644
87078index 0000000..c566332
87079--- /dev/null
87080+++ b/net/netfilter/xt_gradm.c
87081@@ -0,0 +1,51 @@
87082+/*
87083+ * gradm match for netfilter
87084